Initial commit.
continuous-integration/drone/push Build is failing
Details
continuous-integration/drone/push Build is failing
Details
This commit is contained in:
commit
7411bfccbd
|
@ -0,0 +1,120 @@
|
|||
kind: pipeline
|
||||
type: kubernetes
|
||||
name: release
|
||||
trigger:
|
||||
branch:
|
||||
- master
|
||||
- develop
|
||||
steps:
|
||||
- name: submodules
|
||||
image: alpine/git
|
||||
commands:
|
||||
- git submodule update --recursive --remote --init
|
||||
|
||||
- name: production:build
|
||||
when:
|
||||
branch:
|
||||
- master
|
||||
image: docker.io/klakegg/hugo:0.82.0-alpine
|
||||
commands:
|
||||
- >-
|
||||
hugo
|
||||
--baseURL=https://hkoerber.de/
|
||||
--cleanDestinationDir
|
||||
--minify
|
||||
--environment production
|
||||
--destination ./public/
|
||||
- chmod -R o+rX ./public/
|
||||
|
||||
- name: preview:build
|
||||
image: docker.io/klakegg/hugo:0.82.0-alpine
|
||||
when:
|
||||
branch:
|
||||
- develop
|
||||
commands:
|
||||
- >-
|
||||
hugo
|
||||
--baseURL=https://preview.hkoerber.de/
|
||||
--cleanDestinationDir
|
||||
--minify
|
||||
--buildDrafts
|
||||
--buildFuture
|
||||
--environment preview
|
||||
--destination ./public/
|
||||
- chmod -R o+rX ./public/
|
||||
|
||||
- name: production:image
|
||||
image: registry.hkoerber.de/drone-kaniko:latest
|
||||
when:
|
||||
branch:
|
||||
- master
|
||||
settings:
|
||||
dockerfile: Dockerfile.nginx
|
||||
registry: registry.hkoerber.de
|
||||
repo: blog
|
||||
tags:
|
||||
- ${DRONE_COMMIT_SHA}
|
||||
|
||||
- name: preview:image
|
||||
image: registry.hkoerber.de/drone-kaniko:latest
|
||||
when:
|
||||
branch:
|
||||
- develop
|
||||
settings:
|
||||
dockerfile: Dockerfile.nginx
|
||||
registry: registry.hkoerber.de
|
||||
repo: blog
|
||||
tags:
|
||||
- ${DRONE_COMMIT_SHA}-preview
|
||||
|
||||
- name: production:update k8s
|
||||
image: alpine/git
|
||||
when:
|
||||
branch:
|
||||
- master
|
||||
commands:
|
||||
# the explicit setting of the env variables is required because drone does
|
||||
# not "export" the environment variables set with "environment:", they are
|
||||
# only usable inside the scripts.
|
||||
- export GIT_AUTHOR_NAME="Drone"
|
||||
- export GIT_AUTHOR_EMAIL="drone@hkoerber.de"
|
||||
- export GIT_COMMITTER_NAME="$$GIT_AUTHOR_NAME"
|
||||
- export GIT_COMMITTER_EMAIL="$$GIT_AUTHOR_EMAIL"
|
||||
- git clone https://code.hkoerber.de/hannes/mycloud mycloud
|
||||
- cd mycloud/k8s/manifests/blog
|
||||
- "sed -i 's#image: registry.hkoerber.de/blog:.*$#image: registry.hkoerber.de/blog:${DRONE_COMMIT_SHA}#' 50-deployment.yaml"
|
||||
# Check if file actually changed, we're done if it did not. Most likely a
|
||||
# pipeline re-run
|
||||
- git diff --exit-code --quiet -- 50-deployment.yaml && exit 0 || true
|
||||
- git add 50-deployment.yaml
|
||||
- >-
|
||||
git commit
|
||||
-m 'k8s: Update blog container image'
|
||||
-m "Triggered in repo $DRONE_REPO by commit $DRONE_COMMIT"
|
||||
- git push origin master
|
||||
|
||||
- name: preview:update k8s
|
||||
image: alpine/git
|
||||
when:
|
||||
branch:
|
||||
- develop
|
||||
commands:
|
||||
# the explicit setting of the env variables is required because drone does
|
||||
# not "export" the environment variables set with "environment:", they are
|
||||
# only usable inside the scripts.
|
||||
- export GIT_AUTHOR_NAME="Drone"
|
||||
- export GIT_AUTHOR_EMAIL="drone@hkoerber.de"
|
||||
- export GIT_COMMITTER_NAME="$$GIT_AUTHOR_NAME"
|
||||
- export GIT_COMMITTER_EMAIL="$$GIT_AUTHOR_EMAIL"
|
||||
- git clone https://code.hkoerber.de/hannes/mycloud mycloud
|
||||
- cd mycloud/k8s/manifests/blog-preview
|
||||
- "sed -i 's#image: registry.hkoerber.de/blog:.*$#image: registry.hkoerber.de/blog:${DRONE_COMMIT_SHA}-preview#' 50-deployment.yaml"
|
||||
# Check if file actually changed, we're done if it did not. Most likely a
|
||||
# pipeline re-run
|
||||
- git diff --exit-code --quiet -- 50-deployment.yaml && exit 0 || true
|
||||
- git add 50-deployment.yaml
|
||||
- >-
|
||||
git commit
|
||||
-m 'k8s: Update blog preview container image'
|
||||
-m "Triggered in repo $DRONE_REPO by commit $DRONE_COMMIT"
|
||||
- git push origin master
|
|
@ -0,0 +1,3 @@
|
|||
/public
|
||||
/themes
|
||||
/resources
|
|
@ -0,0 +1,4 @@
|
|||
FROM nginx:1.18.0
|
||||
|
||||
ADD public/ /usr/share/nginx/html/
|
||||
ADD nginx.server.conf /etc/nginx/conf.d/default.conf
|
|
@ -0,0 +1,50 @@
|
|||
REGISTRY := registry.hkoerber.de
|
||||
APPNAME := blog
|
||||
PUSHURL := $(REGISTRY)/$(APPNAME)
|
||||
|
||||
.PHONY: build
|
||||
build-production:
|
||||
git diff-index --quiet HEAD || { echo >&2 "Local changes, refusing to build" ; exit 1 ; }
|
||||
docker run \
|
||||
--rm \
|
||||
--net host \
|
||||
-v $(PWD):/workdir \
|
||||
-w /workdir \
|
||||
registry.hkoerber.de/hugo:f216de6b127620641bcaf1d28fe16bf1ea2db884 \
|
||||
/app/bin/hugo \
|
||||
--baseURL=https://hkoerber.de/ \
|
||||
--cleanDestinationDir \
|
||||
--minify \
|
||||
--destination ./public/
|
||||
sudo chown -R $(shell id -u):$(shell id -g) ./public
|
||||
sudo chmod -R o+rX ./public
|
||||
|
||||
.PHONY: image
|
||||
image-production: build-production
|
||||
git diff-index --quiet HEAD || { echo >&2 "Local changes, refusing to build" ; exit 1 ; }
|
||||
docker build \
|
||||
--file ./Dockerfile.nginx \
|
||||
--tag $(REGISTRY)/$(APPNAME):latest \
|
||||
--tag $(REGISTRY)/$(APPNAME):$(shell git rev-parse HEAD) \
|
||||
.
|
||||
|
||||
|
||||
.PHONY: push-production
|
||||
push-production: image-production
|
||||
docker push $(REGISTRY)/$(APPNAME):latest
|
||||
docker push $(REGISTRY)/$(APPNAME):$(shell git rev-parse HEAD)
|
||||
|
||||
.PHONY: release
|
||||
release: push-production
|
||||
|
||||
.PHONY: preview
|
||||
preview:
|
||||
docker run \
|
||||
--rm \
|
||||
--net host \
|
||||
-v $(PWD):/workdir \
|
||||
-w /workdir \
|
||||
registry.hkoerber.de/hugo:f000054616d7789202b06872a6535bcb9fd500c9 \
|
||||
hugo serve \
|
||||
--watch \
|
||||
--buildDrafts
|
|
@ -0,0 +1,16 @@
|
|||
# Inspirations
|
||||
|
||||
# Ideas
|
||||
|
||||
- timeline (like https://leerob.io/)
|
||||
- "views" aka values, methods (like https://dynamicwebpaige.github.io/info/)
|
||||
|
||||
General thoughts: https://paulstamatiou.com/about-this-website/
|
||||
|
||||
Stats like on the right at https://paulstamatiou.com/about/
|
||||
|
||||
Stuff I use: https://paulstamatiou.com/stuff-i-use/
|
||||
|
||||
trips and mapbox
|
||||
|
||||
this is a good one: https://rymc.io/about/
|
|
@ -0,0 +1 @@
|
|||
url: "https://blog.hkoerber.de"
|
|
@ -0,0 +1 @@
|
|||
url: "https://staging.blog.hkoerber.de"
|
|
@ -0,0 +1,31 @@
|
|||
---
|
||||
sites:
|
||||
- fa_icon: fa-map-marker
|
||||
name: Ansbach, Germany
|
||||
- url: "https://www.tradebyte.com/"
|
||||
fa_icon: " fa-suitcase"
|
||||
name: Tradebyte Software GmbH
|
||||
- url: "https://github.com/hakoerber"
|
||||
fa_icon: "fa-github"
|
||||
name: Github
|
||||
- url: "https://gitlab.com/whatevsz"
|
||||
fa_icon: "fa-gitlab"
|
||||
name: Gitlab
|
||||
- url: "https://code.hkoerber.de/explore/projects"
|
||||
fa_icon: "fa-code"
|
||||
name: Code
|
||||
- url: "https://www.linkedin.com/in/hannes-k%C3%B6rber-479ab5122"
|
||||
fa_icon: "fa-linkedin-square"
|
||||
name: LinkedIn
|
||||
- url: "https://twitter.com/whatevsz"
|
||||
fa_icon: "fa-twitter-square"
|
||||
name: Twitter
|
||||
- url: "https://keybase.io/hakoerber"
|
||||
fa_icon: "fa-lock"
|
||||
name: Keybase
|
||||
- url: "mailto:hannes.koerber@gmail.com"
|
||||
fa_icon: "fa-envelope"
|
||||
name: E-Mail
|
||||
- url: "https://status.haktec.de"
|
||||
fa_icon: "fa-plus-circle"
|
||||
name: Status
|
|
@ -0,0 +1,6 @@
|
|||
---
|
||||
title = "{{ replace .TranslationBaseName "-" " " | title }}"
|
||||
date = "{{ .Date }}"
|
||||
description = "About the page"
|
||||
draft = true
|
||||
---
|
|
@ -0,0 +1,119 @@
|
|||
baseURL: http://localhost:1313/
|
||||
title: Hannes Körber
|
||||
uglyurls: false
|
||||
disablePathToLower: true
|
||||
languageCode: en
|
||||
canonifyURLs: false
|
||||
# pygmentsStyle: "monokai"
|
||||
pygmentsCodefences: true
|
||||
pygmentsUseClasses: true
|
||||
MetaDataFormat: yaml
|
||||
|
||||
enableRobotsTXT: true
|
||||
|
||||
frontmatter:
|
||||
date:
|
||||
- date
|
||||
|
||||
markup:
|
||||
goldmark:
|
||||
renderer:
|
||||
unsafe: true
|
||||
|
||||
taxonomies:
|
||||
tag: tags
|
||||
category: categories
|
||||
|
||||
related:
|
||||
threshold: 50
|
||||
includeNewer: true
|
||||
toLower: true
|
||||
indices:
|
||||
- name: tags
|
||||
weight: 100
|
||||
- name: keywords
|
||||
weight: 50
|
||||
- name: date
|
||||
weight: 10
|
||||
|
||||
author: Hannes Körber
|
||||
params:
|
||||
description: "My Blog"
|
||||
author: Hannes Körber
|
||||
social:
|
||||
- name: github
|
||||
link: https://github.com/hakoerber
|
||||
icon: fa-github
|
||||
style: fab
|
||||
- name: gitlab
|
||||
link: https://gitlab.com/whatevsz
|
||||
icon: fa-gitlab
|
||||
style: fab
|
||||
- name: linkedin
|
||||
link: https://www.linkedin.com/in/hannes-koerber
|
||||
icon: fa-linkedin
|
||||
style: fab
|
||||
- name: xing
|
||||
link: https://www.xing.com/profile/Hannes_Koerber
|
||||
icon: fa-xing
|
||||
style: fab
|
||||
- name: keybase
|
||||
link: https://keybase.io/hakoerber
|
||||
icon: fa-key
|
||||
style: fas
|
||||
# - name: twitter
|
||||
# link: https://twitter.com/whatevsz
|
||||
# icon: fa-twitter
|
||||
# style: fab
|
||||
- name: E-Mail
|
||||
link: mailto:hannes.koerber@gmail.com
|
||||
icon: fa-envelope
|
||||
style: fas
|
||||
description: Send me a mail!
|
||||
- name: RSS
|
||||
link: /blog/index.xml
|
||||
icon: fa-rss
|
||||
style: fas
|
||||
description: Follow my blog on RSS!
|
||||
|
||||
|
||||
permalinks:
|
||||
blog: /:section/:year/:month/:day/:title/
|
||||
|
||||
menu:
|
||||
main:
|
||||
- name: "Blog"
|
||||
url : "/blog/"
|
||||
weight: 1
|
||||
|
||||
- name: "Skills"
|
||||
url : "/skills/"
|
||||
weight: 2
|
||||
|
||||
- name: "Projects"
|
||||
url : "/projects/"
|
||||
weight: 3
|
||||
|
||||
- name: "About Me"
|
||||
url : "/about/"
|
||||
weight: 4
|
||||
|
||||
- name: "More"
|
||||
identifier: more
|
||||
weight: 5
|
||||
|
||||
- name: "Work"
|
||||
url : "/work/"
|
||||
parent: more
|
||||
weight: 2
|
||||
|
||||
|
||||
- name: "Events"
|
||||
url: "/events/"
|
||||
parent: more
|
||||
weight: 4
|
||||
|
||||
- name: "Talks"
|
||||
url: "/talks/"
|
||||
parent: more
|
||||
weight: 5
|
|
@ -0,0 +1,2 @@
|
|||
params:
|
||||
allow_robots: false
|
|
@ -0,0 +1,2 @@
|
|||
params:
|
||||
allow_robots: false
|
|
@ -0,0 +1,2 @@
|
|||
params:
|
||||
allow_robots: true
|
|
@ -0,0 +1,127 @@
|
|||
---
|
||||
---
|
||||
<div class="hero block">
|
||||
<div class="hero-body is-clearfix">
|
||||
<div class="columns is-vcentered">
|
||||
<div class="column content">
|
||||
<h1 class="subtitle is-2 has-text-weight-normal">Welcome!</h1>
|
||||
|
||||
<p class="block has-text-justified">
|
||||
Hello, welcome to my homepage! Here, you will find some articles (mostly tech),
|
||||
some info about myself and whatever else I am thinking of.
|
||||
</p>
|
||||
</div>
|
||||
<div class="column is-narrow is-hidden-mobile">
|
||||
<figure class="image is-pulled-right" style="height:200px;width:200px">
|
||||
<img class="is-rounded" alt="Me" src="/assets/images/me.jpg">
|
||||
</figure>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="tile is-ancestor is-vertical">
|
||||
<div class="tile">
|
||||
<div class="tile is-parent">
|
||||
<a href="/blog/" class="tile is-child box is-clearfix">
|
||||
<div class="columns is-vcentered is-mobile">
|
||||
<div class="column">
|
||||
<p class="subtitle is-4">Blog</p>
|
||||
</div>
|
||||
<div class="column is-narrow">
|
||||
<figure class="mr-2 my-2 ml-0 image is-48x48 is-pulled-right">
|
||||
<span class="icon is-large">
|
||||
<i class="fas fa-3x fa-pen-fancy"></i>
|
||||
</span>
|
||||
</figure>
|
||||
</div>
|
||||
</div>
|
||||
</a>
|
||||
</div>
|
||||
<div class="tile is-parent">
|
||||
<a href="/about/" class="tile is-child box is-clearfix">
|
||||
<div class="columns is-vcentered is-mobile">
|
||||
<div class="column">
|
||||
<p class="subtitle is-4">About Me</p>
|
||||
</div>
|
||||
<div class="column is-narrow">
|
||||
<figure class="mr-2 my-2 ml-0 image is-48x48 is-pulled-right">
|
||||
<span class="icon is-large">
|
||||
<i class="fa fa-3x fa-users"></i>
|
||||
</span>
|
||||
</figure>
|
||||
</div>
|
||||
</div>
|
||||
</a>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="tile">
|
||||
<div class="tile is-parent">
|
||||
<a href="/work/" class="tile is-child box is-clearfix">
|
||||
<div class="columns is-vcentered is-mobile">
|
||||
<div class="column">
|
||||
<p class="subtitle is-4">Work</p>
|
||||
</div>
|
||||
<div class="column is-narrow">
|
||||
<figure class="mr-2 my-2 ml-0 image is-48x48 is-pulled-right">
|
||||
<span class="icon is-large">
|
||||
<i class="fas fa-3x fa-briefcase"></i>
|
||||
</span>
|
||||
</figure>
|
||||
</div>
|
||||
</div>
|
||||
</a>
|
||||
</div>
|
||||
<div class="tile is-parent">
|
||||
<a href="/skills/" class="tile is-child box is-clearfix">
|
||||
<div class="columns is-vcentered is-mobile">
|
||||
<div class="column">
|
||||
<p class="subtitle is-4">Skills</p>
|
||||
</div>
|
||||
<div class="column is-narrow">
|
||||
<figure class="mr-2 my-2 ml-0 image is-48x48 is-pulled-right">
|
||||
<span class="icon is-large">
|
||||
<i class="fas fa-3x fa-university"></i>
|
||||
</span>
|
||||
</figure>
|
||||
</div>
|
||||
</div>
|
||||
</a>
|
||||
</div>
|
||||
<div class="tile is-parent">
|
||||
<a href="/projects/" class="tile is-child box is-clearfix">
|
||||
<div class="columns is-vcentered is-mobile">
|
||||
<div class="column">
|
||||
<p class="subtitle is-4">Projects</p>
|
||||
</div>
|
||||
<div class="column is-narrow">
|
||||
<figure class="mr-2 my-2 ml-0 image is-48x48 is-pulled-right">
|
||||
<span class="icon is-large">
|
||||
<i class="fas fa-3x fa-keyboard"></i>
|
||||
</span>
|
||||
</figure>
|
||||
</div>
|
||||
</div>
|
||||
</a>
|
||||
</div>
|
||||
</div>
|
||||
<div class="tile">
|
||||
<div class="tile is-parent">
|
||||
<a href="/about/" class="tile is-child box is-clearfix">
|
||||
<div class="columns is-vcentered is-mobile">
|
||||
<div class="column">
|
||||
<p class="subtitle is-4">Contact</p>
|
||||
</div>
|
||||
<div class="column is-narrow">
|
||||
<figure class="mr-2 my-2 ml-0 image is-48x48 is-pulled-right">
|
||||
<span class="icon is-large">
|
||||
<i class="fas fa-3x fa-phone"></i>
|
||||
</span>
|
||||
</figure>
|
||||
</div>
|
||||
</div>
|
||||
</a>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
|
@ -0,0 +1,176 @@
|
|||
---
|
||||
---
|
||||
|
||||
<div class="columns is-centered">
|
||||
<div class="column">
|
||||
<h1 class="subtitle is-3 has-text-weight-normal">About Me</h1>
|
||||
<hr>
|
||||
<p>
|
||||
I'm Hannes Körber, a technology enthusiast currently living in Ansbach, Germany.
|
||||
</p>
|
||||
</div>
|
||||
<div class="column is-narrow is-flex" style="justify-content: center;">
|
||||
<div class="image ml-5 my-5">
|
||||
<img class="is-rounded" src="/assets/images/me.jpg" alt="Me" style="height:200px;width:200px;min-width:200px;">
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<hr>
|
||||
|
||||
<h2 class="subtitle is-3 has-text-weight-normal">Why I do what I am doing</h2>
|
||||
|
||||
<div class="has-text-justified">
|
||||
<p>
|
||||
I started working with computers when I was around ten years old. In the beginning, I mainly used them for gaming, but got more and more interested in the internals --- how a computer actually works.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
In school, I started programming (Visual Basic and C#) and was completely blown away that I could TELL the computer what to do, whatever it was. I then began building my own computers, and after the German "Abitur" (comparable to a high school degree), I started studying Information and Communications Technology at Friedrich-Alexander-Universität Erlangen-Nürnberg.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
During my university years, I first came in contact with Linux. It was like discovering computers all over again. With Linux, I was free to do everything I wanted with my computer. A few months after having my first contact with Linux, I abandoned Windows for good and have not looked back. I quickly learned everything I could about Linux and computer science in general. By choosing computer science courses over Electrical engineering courses (which I still like and do as a hobby) I decided on my career path: Information Technology.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
During my mandatory internship I worked at Tradebyte Software GmbH, a startup-become-medium-sized company offering SaaS solutions for eCommerce in Ansbach. After my internship, I stayed as a working student while finishing my master's thesis and started working full time right after graduation (ok, a one-month holiday in New Zealand was necessary!)
|
||||
</p>
|
||||
</div>
|
||||
|
||||
<hr>
|
||||
|
||||
<h2 class="subtitle is-3 has-text-weight-normal">What I do in my free time</h2>
|
||||
|
||||
<div class="has-text-justified">
|
||||
<p>
|
||||
I once read somewhere that you should have one hobby in each of the following three categories:
|
||||
</p>
|
||||
|
||||
<ul>
|
||||
<li>A <b>physical hobby</b>, where you do some physical activity, preferably outside</li>
|
||||
<li>A <b>creative hobby</b>, where you create something new</li>
|
||||
<li>A hobby that makes you <b>money</b></li>
|
||||
</ul>
|
||||
|
||||
<p>
|
||||
Well, the last one for me is the one that comes most naturally: I take care of my own
|
||||
private "cloud" that encompasses a few services that for me replace google, dropbox etc.
|
||||
I use this to keep up to date on a lot of technologies that I cannot work with in my
|
||||
day-to-day job. So it indirectly makes me money by increasing my market value.
|
||||
</p>
|
||||
|
||||
<hr>
|
||||
|
||||
<div class="columns is-variable is-8">
|
||||
<div class="column">
|
||||
<h3 class="subtitle is-4 has-text-weight-normal">Sports</h3>
|
||||
|
||||
<p>
|
||||
For a physical hobby, I do not have a single one or even one I focus one, but I do
|
||||
numerous different things. Quantity over quality if you want:
|
||||
</p>
|
||||
|
||||
<div class="has-text-left">
|
||||
<ul>
|
||||
<li>Cycling, from multi-day {{< myhighlight >}}Bike touring{{< /myhighlight >}} to {{< myhighlight >}}Mountain biking{{< /myhighlight >}} and some mild {{< myhighlight >}}Downhill{{< /myhighlight >}}. I bought a new bike mid of 2020 (a Radon Skeen Trail AL 2020, see picture), a mountain bike that has to fill all those roles in one</li>
|
||||
<li>{{< myhighlight >}}Hiking{{< /myhighlight >}} and some "entry-level" {{< myhighlight >}}Mountaineering{{< /myhighlight >}}</li>
|
||||
<li>{{< myhighlight >}}Kayaking{{< /myhighlight >}}</li>
|
||||
<li>Climbing, both {{< myhighlight >}}Boudering{{< /myhighlight >}} and {{< myhighlight >}}Rock climbing{{< /myhighlight >}}</li>
|
||||
</ul>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="column is-narrow is-flex my-5" style="flex-direction: column; align-items: center;">
|
||||
<figure class="image mb-6">
|
||||
<img src="/assets/images/nebelhorn.jpg" alt="Photo from the tour to the Nebelhorn" style="width:240px;">
|
||||
{{< figcaption >}}Nebelhorn, Oberstdorf, February 2020{{< /figcaption >}}
|
||||
</figure>
|
||||
<figure class="image mb-6">
|
||||
<img src="/assets/images/kayak-naab.jpg" alt="Photo from the kayaking tour on the Naab" style="width:320px;">
|
||||
{{< figcaption >}}Naab, Schwandorf, September 2020{{< /figcaption >}}
|
||||
</figure>
|
||||
<figure class="image">
|
||||
<img src="/assets/images/skeen-trail-al-2020.jpg" alt="The Radom Skeen Trail Mountain Bike" style="width:240px;">
|
||||
{{< figcaption >}}Radon Skeen Trail AL 2020{{< /figcaption >}}
|
||||
</figure>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<hr>
|
||||
|
||||
<div class="columns is-variable is-8">
|
||||
<div class="column">
|
||||
<h3 class="subtitle is-4 has-text-weight-normal">Creativity</h3>
|
||||
<p>
|
||||
The last kind of hobby—the creative one—is the one I have to force myself to do the most.
|
||||
</p>
|
||||
<p>
|
||||
I have been learning the {{< myhighlight >}}Guitar{{< /myhighlight >}}
|
||||
since mid of 2019. I'm using <a href="https://www.justinguitar.com/">JustinGuitar's course</a> to get to
|
||||
know the basics. It's enough for some simple strumming, but don't expect any concerts right now.
|
||||
My goal is do be campfire-ready.
|
||||
</p>
|
||||
</div>
|
||||
<div class="column is-narrow is-flex my-5" style="flex-direction: column; align-items: center;">
|
||||
<figure class="image">
|
||||
<img src="/assets/images/guitar.jpg" alt="Playing guitar" style="width:320px;">
|
||||
{{< figcaption >}}Amsterdam, July 2019{{< /figcaption >}}
|
||||
</figure>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="columns is-variable is-8">
|
||||
<div class="column">
|
||||
<p>
|
||||
When I was younger, I also took some piano lessions with my grandma. After a ten-year hiatus,
|
||||
I've been relearning the {{< myhighlight >}}Piano{{< /myhighlight >}}
|
||||
since beginning of 2020, after buying an electrical piano.
|
||||
</p>
|
||||
<p>
|
||||
I bought a Yamaha P-45 (see picture). It has weighted keys and feels nearly like a "real" piano.
|
||||
</p>
|
||||
</div>
|
||||
<div class="column is-narrow is-flex my-5" style="flex-direction: column; align-items: center;">
|
||||
<figure class="image">
|
||||
<img src="/assets/images/yamaha-p45.jpg" alt="The Yamaha P45 eletrical piano" style="width:320px;">
|
||||
{{< figcaption >}}Yamaha P-45{{< /figcaption >}}
|
||||
</figure>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<p>
|
||||
I also started attending a local church choir to work on my
|
||||
{{< myhighlight >}}Singing{{< /myhighlight >}}, but with the
|
||||
COVID-19 situation this is currently not possible. Maybe that's better for everyone,
|
||||
because no one has to listen to me singing. ;)
|
||||
</p>
|
||||
|
||||
<div class="columns is-variable is-8">
|
||||
<div class="column">
|
||||
<p>
|
||||
During Corona, I got back into {{< myhighlight >}}Chess{{< /myhighlight >}}.
|
||||
My <a href="https://lichess.org/@/whatevsz">lichess rating</a> is currently around 1450.
|
||||
My goal is ~1550, which is around the 50th percentile / median, meaning I'd have a
|
||||
50/50 chance of beating a random opponent on lichess. Unfortunately, I'm mainly
|
||||
focussing on puzzles and not really playing longer games, mainly due to time contraints.
|
||||
</p>
|
||||
<p>
|
||||
Chess is both fascinating (due to the rules' simplicity) and frustrating
|
||||
(due to having no skill ceiling). It can be a grind to improve, but
|
||||
then one day you have this one game that makes it worth it.
|
||||
</p>
|
||||
</div>
|
||||
<div class="column is-narrow is-flex my-5" style="flex-direction: column; align-items: center;">
|
||||
<div class="image">
|
||||
<img src="/assets/images/chess.jpg" alt="Chess pieces" style="width:240px;">
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<p>
|
||||
This webpage itself could also be considered a creative hobby.
|
||||
{{< myhighlight >}}Writing{{< /myhighlight >}} down technical
|
||||
stuff makes me internalize complex topics very efficiently.
|
||||
</p>
|
||||
</div>
|
|
@ -0,0 +1,145 @@
|
|||
---
|
||||
date: 2015-09-27
|
||||
excerpt: How this blog is run
|
||||
tags:
|
||||
- meta
|
||||
- hexo
|
||||
- nginx
|
||||
title: About This Blog
|
||||
toc: true
|
||||
---
|
||||
|
||||
I am going to start this blog with a post about the blog itself.
|
||||
|
||||
In my opinion, simple text files and command line tools is where it's at, so after some googling, I stumbled about Hexo, and just decided to try it out, because I wanted to get experience with blogging and the accompanying software.
|
||||
|
||||
[Hexo](https://hexo.io/) is not actually a complete blogging platform, but simply a static site generator. It takes markdown files, and turns them into nice HTML files with CSS and everything. The big advantage of this approach is that you can write those markdown files in whichever way you like.
|
||||
|
||||
Also, setup is much easier than with a complete, integrated solution like WordPress. You do not need a database, it's much more lightweight and one can change to another platform more easily. There are far fewer security concerns, too. Static content served by a webserver without and backend for PHP or whatever just provides a very small attack surface.
|
||||
|
||||
## The content
|
||||
|
||||
Right now, I am writing this with vim as a simple markdown file. The following plugins make this a bit easier:
|
||||
|
||||
* [Goyo.vim](https://github.com/junegunn/goyo.vim)
|
||||
* [vim-pencil](https://github.com/reedes/vim-pencil)
|
||||
|
||||
The first one makes for distraction-free writing (it simply disables most of the vim UI and resizes the editing area), and the second makes writing prose a breeze. A simple
|
||||
|
||||
```vim
|
||||
:Goyo
|
||||
:PencilSoft
|
||||
```
|
||||
|
||||
and we are ready to go.
|
||||
|
||||
## Hexo
|
||||
|
||||
Ok, so much about actually writing the text, but how do convert this text to a nice webpage? This is where Hexo comes into play. The installation is actually as easy as the website makes it look. Hexo is built with Node.js, so this has to be installed beforehand. Then, the following is enough:
|
||||
|
||||
```shell
|
||||
[~/projects]$ npm install hexo-cli -g
|
||||
[~/projects]$ hexo init blog
|
||||
[~/projects]$ cd blog
|
||||
[~/projects]$ npm install
|
||||
```
|
||||
|
||||
I am not going to show all the stuff Hexo is capable off, you can read through [the official documentation](https://hexo.io/docs/) to get a nice overview.
|
||||
|
||||
## Serving the content
|
||||
|
||||
In the end, we get some publishable HTML files with CSS and everything in the `public/` subfolder. For "production" use, I am simply going to deploy [nginx](http://nginx.org/) to serve these static files. All of this is deployed on a dedicated blogging virtual machine on my home server. Because I do not have a publicly reachable IPv4 address (yay CGNAT!), a small [DigitalOcean](https://www.digitalocean.com/) droplet serves as a reverse proxy over a VPN.
|
||||
|
||||
Because the content is simply a static web page, there is no need for a database or web framework or anything, and serving the content is super fast! The following nginx configuration is enough, assuming your Hexo root is in `/var/lib/hexo/blog` and is readable by group `hexo`:
|
||||
|
||||
```
|
||||
/etc/nginx/nginx.conf
|
||||
```
|
||||
```nginx
|
||||
user nginx hexo;
|
||||
worker_processes auto;
|
||||
|
||||
http {
|
||||
access_log /var/log/nginx/access.log main;
|
||||
error_log /var/log/nginx/error.log warn;
|
||||
|
||||
default_type application/octet-stream;
|
||||
|
||||
include /etc/nginx/mime.types;
|
||||
include /etc/nginx/conf.d/*.conf;
|
||||
|
||||
server {
|
||||
listen 80 default_server;
|
||||
server_name _;
|
||||
root /var/lib/hexo/blog/public;
|
||||
|
||||
location / {
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
## Automating the deployment
|
||||
|
||||
I'm lazy. Right now, I would have to log into my blogging VM, use the `hexo` command to create a new blog post, actually write the post, and generate the static files manually. No preview to ensure that I didn't screw up markdown syntax, no version control.
|
||||
|
||||
Version control is of course always a good idea when you are working with text files. I prefer [git](https://git-scm.com/) as VCS, and I though about using git as a simple deployment tool for the blog. I envisioned a workflow like this on my local machine:
|
||||
|
||||
* run `hexo new "new post"` to create a new post
|
||||
* edit the post in `source/_posts/`
|
||||
* commit the new file to git and push it to the blogging server
|
||||
|
||||
To achieve this, I initialized a new git repository in the hexo blog directory, created a bare repository on the blogging server, and configured the local repository to push to the remote one.
|
||||
|
||||
On the remote server, there is a decicated `hexo` user with the home directory in `/var/lib/hexo`. In there, the direcotry `blog.git/` is the bare repository mentioned above, and `blog/` is configured to always checkout the `master` branch, so these files can be served by nginx.
|
||||
|
||||
The following git hook ([more info here](https://git-scm.com/book/en/v2/Customizing-Git-Git-Hooks)) is used to always update the web root when there are new commits on the `master` branch:
|
||||
|
||||
```
|
||||
/var/lib/hexo/blog.git/hooks/post-receive
|
||||
```
|
||||
```shell
|
||||
#!/usr/bin/env bash
|
||||
|
||||
_logfile="$HOME/hook.log"
|
||||
|
||||
echo "update hook $(date +%FT%T)" >> "$_logfile"
|
||||
git --work-tree="$HOME/blog" --git-dir="$HOME/blog.git" checkout master --force &>> "$_logfile"
|
||||
hexo generate --cwd "$HOME/blog" &>> "$_logfile"
|
||||
```
|
||||
|
||||
Because I don't really care about the commit messages in this case, I also wrote a little bash script on the local machine to automatically make a new commit and push it to the remote server:
|
||||
|
||||
```
|
||||
~/bin/publish-blog
|
||||
```
|
||||
```shell
|
||||
#!/usr/bin/env bash
|
||||
|
||||
cd ~/projects/blog || exit 1
|
||||
git commit --message="Update $(date +%F)"
|
||||
git push server master
|
||||
```
|
||||
|
||||
|
||||
This assumes the local blog repository is in `~/projects/blog` and the remote is simply called `server`.
|
||||
|
||||
Now with a simple
|
||||
|
||||
```shell
|
||||
[~/projects]$ git add <post>
|
||||
[~/projects]$ publish-blog
|
||||
```
|
||||
|
||||
a new post is pushed to the server, the static content is generated and can be accessed over the internet. As everything is in git, a post can easily be removed with `git revert`.
|
||||
|
||||
## Preview server
|
||||
|
||||
While editing a post, it is quite helpful to have a local preview of blog, so you can view if everything looks as it should before publishing. This functionality is already built into Hexo, with the following command we start a web server on the local machine to view our blog which is automatically updates when we make changes:
|
||||
|
||||
```shell
|
||||
[~]$ (cd ~/projects/blog && hexo server --ip 127.0.0.1)
|
||||
```
|
||||
|
||||
Now you can get a nice preview by going to `http://localhost:4000` in your browser.
|
|
@ -0,0 +1,260 @@
|
|||
---
|
||||
date: 2015-09-27
|
||||
excerpt: Combining Rsyslog, Logstash and Kibana to make nice logging dashboards
|
||||
tags:
|
||||
- homelab
|
||||
- logging
|
||||
- elk
|
||||
- elasticsearch
|
||||
- logstash
|
||||
- rsyslog
|
||||
- json
|
||||
- grok
|
||||
title: Using The ELK Stack With Rsyslog
|
||||
toc: true
|
||||
---
|
||||
|
||||
This post will detail my setup that uses [rsyslog](http://www.rsyslog.com/) to send JSON-formatted log messages to an [ELK stack](https://www.elastic.co/webinars/introduction-elk-stack).
|
||||
|
||||
## The result
|
||||
|
||||
Let's start with an overview of what we get in the end:
|
||||
|
||||

|
||||
|
||||
## The log structure
|
||||
|
||||
The setup uses rsyslog to send two different kinds of logs to the logserver: the good old `syslog`, and logfiles written by applications, for example nginx access logs. Both will be formatted as JSON and sent to the logserver via TCP for further processing. Every message has certain attributes that describes their origin:
|
||||
|
||||
* `host`: identifes the host that sent the message, subfields are `ip` and `name`
|
||||
* `type`: can either be `syslog` or `application` and distinguishes a syslog entry from an application logfile
|
||||
* `content`: the actual log message
|
||||
|
||||
`content` can either be a string (in case of a logfile, this is simply a line in the file) or a dictionary that contains attributes of the message. For syslog, these attributes are:
|
||||
|
||||
* `host`: syslog host field
|
||||
* `severity`: syslog severity
|
||||
* `facility`: syslog facility
|
||||
* `tag`: syslog tag
|
||||
* `message`: syslog message
|
||||
* `program`: syslog program
|
||||
|
||||
On the server side, the `content` attribute can be parsed depending on the application. For example, nginx access logs can be parsed to include response code, verbs, user agents and many more.
|
||||
|
||||
All of this makes for easy searching in Kibana. Here are some examples for filters that can be used:
|
||||
|
||||
Get all messages from a specific host:
|
||||
|
||||
```
|
||||
host.name:"host.domain"
|
||||
```
|
||||
|
||||
Show all firewall events (Note that this is kind of redudant, the first expression can be left out because it is implied in the second):
|
||||
|
||||
```
|
||||
logtype:"application" AND application:"iptables-block"
|
||||
```
|
||||
|
||||
Gather all serverside errors of nginx servers:
|
||||
|
||||
```
|
||||
application:"nginx-access" AND nginx-access.response:[500 TO 599]
|
||||
```
|
||||
|
||||
## Sending JSON with rsyslog
|
||||
|
||||
On every server that sends its logs to our logserver, rsyslog is installed and configured to send all logs in the JSON format described above. Of course, local logging is also done.
|
||||
|
||||
Sending data over TCP can be done via the [omfwd](http://www.rsyslog.com/doc/v8-stable/configuration/modules/omfwd.html) output module that is included in rsyslog by default. The configuration looks like this:
|
||||
|
||||
```java
|
||||
action(
|
||||
type="omfwd"
|
||||
Template="syslog-json"
|
||||
Target="logserver.example.com"
|
||||
Port="515"
|
||||
Protocol="tcp"
|
||||
)
|
||||
```
|
||||
|
||||
Here we use TCP port 515, because 514 is commonly used for plain syslog. The `template` directive defines which template we use to format the logs. The template for syslog messages looks like this and must be defined **before** the accompanying `action`:
|
||||
|
||||
```java
|
||||
template(name="syslog-json" type="list") {
|
||||
constant(value="{")
|
||||
constant(value="\"logtype\":\"") constant(value="syslog" format="json")
|
||||
constant(value="\",\"content\":{")
|
||||
constant(value="\"@timestamp\":\"") property(name="timegenerated" format="json" dateFormat="rfc3339")
|
||||
constant(value="\",\"host\":\"") property(name="hostname" format="json")
|
||||
constant(value="\",\"severity\":\"") property(name="syslogseverity-text" format="json")
|
||||
constant(value="\",\"facility\":\"") property(name="syslogfacility-text" format="json")
|
||||
constant(value="\",\"tag\":\"") property(name="syslogtag" format="json")
|
||||
constant(value="\",\"message\":\"") property(name="msg" format="json")
|
||||
constant(value="\",\"program\":\"") property(name="programname" format="json")
|
||||
constant(value="\"}")
|
||||
constant(value=",\"hostinfo\":{")
|
||||
constant(value="\"name\":\"") property(name="$myhostname" format="json")
|
||||
constant(value="\"}")
|
||||
constant(value="}")
|
||||
}
|
||||
```
|
||||
|
||||
Note that the `host.ip` attribute is missing. It will be added later at the server, because syslog does not provide a way to get the IP of the server it is running on (which might be quite difficult to do on servers with multiple interfaces).
|
||||
|
||||
The `format="json"` option for the property replacers makes sure that the string is properly quoted if it contains curly braces for example.
|
||||
|
||||
Forwarding logfiles is a bit more complex: For each file, a template and input module definition is needed, together with ruleset to bind both to a output module. The input is defined as a [imfile](http://www.rsyslog.com/doc/v8-stable/configuration/modules/imfile.html) module. For an nginx access logfile, it would look like this:
|
||||
|
||||
```java
|
||||
input(type="imfile"
|
||||
File="/var/log/nginx/access.log"
|
||||
Tag="nginx-access"
|
||||
StateFile="-var-log-nginx-access.log.state"
|
||||
ruleset="forward-nginx-access"
|
||||
)
|
||||
```
|
||||
|
||||
The `Tag` can be an arbitrary string and would correspond to the `syslogtag` attribute. Because we are not using syslog for file forwarding, it does not matter at all, but is required and is set to something descriptive.
|
||||
|
||||
`StateFile` defines the path to a file that rsyslog uses to keep track of its current position in the file. This is needed to preserve state between reboots or rsyslog daemon restarts. Otherwise, every time rsyslog starts it would forward the **entire** file to our logserver. The value defines the filename, which is kept under `/var/lib/rsyslog/`. Here, we simply use the full path to the logfile, with slashes replaced by hyphens. Anything else is fine, as long as it is unique among all input definitions.
|
||||
|
||||
Lastly, the `ruleset` determines which ruleset to bind this input to. This will be explained further down.
|
||||
|
||||
The template that is used to pack the information into JSON looks like this:
|
||||
|
||||
```java
|
||||
template(name="nginx-access-json" type="list") {
|
||||
constant(value="{")
|
||||
constant(value="\"logtype\":\"") constant(value="application" format="json")
|
||||
constant(value="\",\"application\":\"") constant(value="nginx-access" format="json")
|
||||
constant(value="\",\"content\":{")
|
||||
constant(value="\"message\":\"") property(name="msg" format="json")
|
||||
constant(value="\"}")
|
||||
constant(value=",\"hostinfo\":{")
|
||||
constant(value="\"name\":\"") property(name="$myhostname" format="json")
|
||||
constant(value="\"}")
|
||||
constant(value="}")
|
||||
}
|
||||
```
|
||||
|
||||
The `action` that sends the logs to the logging server looks the same for both syslog and file forwarding. But because each file action only applies to a single file, a `ruleset` needs to be defined to bind the `action` and the `template` together:
|
||||
|
||||
```java
|
||||
ruleset(name="forward-nginx-access") {
|
||||
action(
|
||||
type="omfwd"
|
||||
Template="nginx-access-json"
|
||||
Target="logserver.example.com"
|
||||
Port="515"
|
||||
Protocol="tcp"
|
||||
)
|
||||
}
|
||||
```
|
||||
|
||||
## Receiving and parsing logs with logstash
|
||||
|
||||
Now that logs are sent in a nice format, the logging server has to be configured to receive and store these logs. This is done using [logstash](https://www.elastic.co/products/logstash), which is part of the ELK stack.
|
||||
|
||||
The logstash configuration file is separated into three parts: input, filter, and output. The input part is configured to simply listen on TCP port 515 for messages, and logstash can automatically parse the JSON it receives:
|
||||
|
||||
```
|
||||
/etc/logstash/conf.d/10_listen_tcp_json.conf
|
||||
```
|
||||
|
||||
```ruby
|
||||
input {
|
||||
tcp {
|
||||
type => "log_json"
|
||||
port => 515
|
||||
codec => json
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
`type` is an arbitrary string that will later be used to distinguish JSON logs from other inputs (logstash could also listen for syslog on port 514, for example)
|
||||
|
||||
```
|
||||
/etc/logstash/conf.d/50_filter.conf
|
||||
```
|
||||
```ruby
|
||||
filter {
|
||||
if [type] == "log_json" {
|
||||
# complete the host attribute to contain both hostname and IP
|
||||
mutate {
|
||||
add_field => {
|
||||
"host[name]" => "[hostinfo][name]"
|
||||
"host[ip]" => "%{host}"
|
||||
}
|
||||
remove_field => "hostinfo"
|
||||
}
|
||||
|
||||
# remove timestamp in syslog
|
||||
if [logtype] == "syslog" {
|
||||
mutate {
|
||||
remove_field => "content[@timestamp]"
|
||||
}
|
||||
}
|
||||
|
||||
# application-specific parsing
|
||||
if [logtype] == "application" {
|
||||
if [application] == "nginx-access" {
|
||||
grok {
|
||||
match => { "content[message]" => "%{NGINXACCESS}" }
|
||||
patterns_dir => "./patterns"
|
||||
remove_field => "content[message]"
|
||||
}
|
||||
mutate {
|
||||
rename => {
|
||||
"clientip" => "[content][clientip]"
|
||||
"ident" => "[content][ident]"
|
||||
"auth" => "[content][auth]"
|
||||
"timestamp" => "[content][timestamp]"
|
||||
"request" => "[content][request]"
|
||||
"httpversion" => "[content][httpversion]"
|
||||
"response" => "[content][response]"
|
||||
"bytes" => "[content][bytes]"
|
||||
"referrer" => "[content][referrer]"
|
||||
"agent" => "[content][agent]"
|
||||
"verb" => "[content][verb]"
|
||||
}
|
||||
rename => {
|
||||
"content" => "nginx-access"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
This big rename for nginx access logs is necessary because logstash dumps all parsed variables into the top level of the dictionary, which then have to moved into the `content` field.
|
||||
|
||||
Now that the logs are formatted, they can be shipped to a local [elasticsearch](https://www.elastic.co/products/elasticsearch) instance:
|
||||
|
||||
```
|
||||
/etc/logstash/conf.d/80_output_elasticsearch.conf
|
||||
```
|
||||
```ruby
|
||||
output {
|
||||
elasticsearch {
|
||||
host => localhost
|
||||
protocol => transport
|
||||
index => "logstash-%{+YYYY.MM.dd}"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
By default, logstash puts all logs into the same elasticsearch index, namely `logstash`. By using a separate index for each day, old logs can be more easily deleted by simply removing old indices.
|
||||
|
||||
Grok is used for parsing the logfiles. There are several patterns shipped with logstash by default, which can be found [here](https://github.com/elastic/logstash/tree/v1.4.1/patterns). Because there is no pattern for nginx, the following custom one is used:
|
||||
|
||||
```
|
||||
/opt/logstash/patterns/nginx
|
||||
```
|
||||
```
|
||||
NGUSERNAME [a-zA-Z\.\@\-\+_%]+
|
||||
NGUSER %{NGUSERNAME}
|
||||
NGINXACCESS %{IPORHOST:clientip} %{NGUSER:ident} %{NGUSER:auth} \[%{HTTPDATE:timestamp}\] "%{WORD:verb} %{URIPATHPARAM:request} HTTP/%{NUMBER:httpversion}" %{NUMBER:response} (?:%{NUMBER:bytes}|-) (?:"(?:%{URI:referrer}|-)"|%{QS:referrer}) %{QS:agent
|
||||
```
|
||||
|
||||
Kibana should pick up the data automatically, so you get the result seen at the beginning.
|
|
@ -0,0 +1,328 @@
|
|||
---
|
||||
date: 2015-12-29
|
||||
excerpt: Using a local package repository and automating updates of a dozen machines
|
||||
tags:
|
||||
- homelab
|
||||
- package
|
||||
- yum
|
||||
- saltstack
|
||||
title: Homelab CentOS Package Management
|
||||
toc: true
|
||||
---
|
||||
|
||||
Keeping a dozen virtual machines up-to-date can be quite a task. In this post, I will show how to do it automatically and efficiently using yum-cron, a local mirror with rsync, and saltstack.
|
||||
|
||||
I will also describe the setup of a "custom" RPM repository to distribute packages built with the awesome [fpm](https://github.com/jordansissel/fpm)
|
||||
|
||||
## Automatic updates with yum-cron
|
||||
|
||||
Downloading and applying updates with yum can be automated using yum-cron, which is more or less a wrapper around `yum` that runs peridodically with `cron` (hence the name). The setup is quite straightforward, the good old package+config+service triangle, and can be automated using salt:
|
||||
|
||||
```yaml
|
||||
yum-cron:
|
||||
pkg.installed:
|
||||
- name: yum-cron
|
||||
|
||||
service.running:
|
||||
- name: yum-cron
|
||||
- enable: True
|
||||
- require:
|
||||
- pkg: yum-cron
|
||||
|
||||
file.managed:
|
||||
- name: /etc/yum/yum-cron.conf
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
- source: salt://files/yum-cron.conf
|
||||
- require:
|
||||
- pkg: yum-cron
|
||||
- watch_in:
|
||||
- service: yum-cron
|
||||
```
|
||||
|
||||
```ini
|
||||
[commands]
|
||||
update_cmd = default
|
||||
download_updates = yes
|
||||
apply_updates = yes
|
||||
random_sleep = 360
|
||||
```
|
||||
|
||||
My actual "production" state can be found [here](https://github.com/hakoerber/salt-states-parameterized/tree/master/package/autoupdate), and a role tying it all together is available [here](https://github.com/hakoerber/salt-roles-parameterized/blob/master/autoupdate.sls), but the above still does what it should.
|
||||
|
||||
That's it. yum-cron will update the system nightly at a random time between 0:00 and 6:00.
|
||||
|
||||
## The local package repository mirror
|
||||
|
||||
Now all servers pull their updates every day and apply them automatically. There is one problem though: Every server contacts some upstream mirror on the internet, which puts unnecessary strain on their and our connection. To remedy this, we will create a local mirror that is updated regularly and that all other servers can pull packages from.
|
||||
|
||||
First we have to decide which repositories to mirror. Because all servers are exclusively CentOS7 64bit boxes, only repositories matching this release and architecture will be used. The default repos enabled after installing CentOS are the following:
|
||||
|
||||
* `base`
|
||||
* `updates`
|
||||
* `extras`
|
||||
|
||||
In addition to this, [EPEL](https://fedoraproject.org/wiki/EPEL) is also mirrored because it contains some important packages.
|
||||
|
||||
To make managing and updating the repositories easier, I wrote a small python script called [syncrepo](https://github.com/hakoerber/syncrepo). It reads a configuration file (`/etc/syncrepo.conf` in this example) and syncronizes all repositories defined there. The file format is easy to understand and looks like this:
|
||||
|
||||
```json
|
||||
{
|
||||
"base": "/srv/www/packages",
|
||||
"repos": {
|
||||
"centos/7/os": "ftp.fau.de",
|
||||
"centos/7/updates": "ftp.fau.de",
|
||||
"centos/7/extras": "ftp.fau.de",
|
||||
"epel/7/x86_64": "ftp.fau.de"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
`base` refers to the local filesystem path where all files will be stored. `repo` maps the paths of the repositories to the upstream mirrors they will be downloaded from.
|
||||
|
||||
The mentinoned mirrors will use about 27GB of space, so we have to make sure there is plenty of space available. This is done by mounting a NFS export from the NAS there.
|
||||
|
||||
Now it's time for a first sync:
|
||||
|
||||
```shell
|
||||
[~]$ sudo mkdir -p /srv/www/packages/centos/7/
|
||||
[~]$ sudo mkdir -p /srv/www/packages/epel/7/
|
||||
[~]$ sudo /usr/local/bin/syncrepo --config /etc/syncrepo.conf
|
||||
```
|
||||
|
||||
This simply executes the following four commands (one for each repo):
|
||||
|
||||
```
|
||||
rsync $OPTIONS rsync://ftp.fau.de/centos/7/extras/ /srv/www/packages/centos/7/extras
|
||||
|
||||
rsync $OPTIONS rsync://ftp.fau.de/centos/7/updates/ /srv/www/packages/centos/7/updates
|
||||
|
||||
rsync $OPTIONS rsync://ftp.fau.de/centos/7/os/ /srv/www/packages/centos/7/os
|
||||
|
||||
rsync $OPTIONS rsync://ftp.fau.de/epel/7/x86_64/ /srv/www/packages/epel/7/x86_64
|
||||
```
|
||||
|
||||
with OPTIONS being
|
||||
|
||||
```bash
|
||||
--hard-links --out-format "%t %i %n%L " --stats --recursive --update --delete --delete-after --delay-updates
|
||||
```
|
||||
|
||||
to make updates as atomic as possible and give some sensible output.
|
||||
|
||||
This is going to take a while. In the meantime, we can setup a webserver to serve those files over HTTP. I'm going to use nginx here. This can be done using the `repomirror` salt role from the [salt role collection](https://github.com/hakoerber/salt-roles) ([direct link](https://raw.githubusercontent.com/hakoerber/salt-roles/master/repomirror.sls)):
|
||||
|
||||
```shell
|
||||
[~]$ sudo salt-call state.sls roles.repomirror
|
||||
```
|
||||
|
||||
This installs nginx to serve `/srv/www/packages`, configures iptables and sets up rsync and logstash. Yay salt!
|
||||
|
||||
For reference, here is an equivalent `nginx.conf`:
|
||||
|
||||
```nginx
|
||||
user nginx;
|
||||
|
||||
events {}
|
||||
|
||||
http {
|
||||
include /etc/nginx/mime.types;
|
||||
default_type application/octet-stream;
|
||||
|
||||
server {
|
||||
listen 80 default_server;
|
||||
server_name _;
|
||||
root /srv/www/packages;
|
||||
|
||||
location / {
|
||||
autoindex on;
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
If using the salt role, nginx should already be running, otherwise
|
||||
|
||||
```shell
|
||||
[~]$ sudo systemctl start nginx
|
||||
```
|
||||
|
||||
will do it manually. Note that when `/srv/www/packages` is a NFS mount and SELinux is enabled, a boolean needs to be set to allow nginx to use NFS:
|
||||
|
||||
```shell
|
||||
[~]$ sudo setsebool -P httpd_use_nfs=1
|
||||
```
|
||||
|
||||
Now, when `syncrepo` is done, the server is a functioning mirror, ready to distribute packages to clients. The last thing to do is automating a repo sync at a certain interval. Cron is perfect for this. The following line in `/etc/crontab` will run the sync each day at 22:00 with a random one hour max delay, which gives it enough time to finish before the clients retrieve their updates (which is between 0:00 and 6:00 as mentioned above):
|
||||
|
||||
```
|
||||
0 22 * * * root perl -le 'sleep rand 60*60' ; /usr/local/bin/syncrepo --config /etc/syncrepo.conf >>/var/log/syncrepo.log 2>&1
|
||||
```
|
||||
|
||||
That's it. The next thing will be configuring the other servers to use our new local mirror.
|
||||
|
||||
## Using the local mirror on the other servers
|
||||
|
||||
This task is quite simple: The `baseurl` setting has to be changed to point to the local mirror for all repositories in `/etc/yum.repos.d`. Changing
|
||||
|
||||
```
|
||||
baseurl=http://mirror.centos.org/centos/$releasever/os/$basearch/
|
||||
```
|
||||
|
||||
or
|
||||
|
||||
```
|
||||
mirrorlist=http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=os&infra=$infr
|
||||
```
|
||||
|
||||
to
|
||||
|
||||
```
|
||||
baseurl=http://pkg01.lab/centos/$releasever/os/$basearch/
|
||||
```
|
||||
|
||||
does the trick for the `base` repo, and the other repositories are similar. Of course it is super tedious to do this for every single server, so let's use salt to automate the process. The `pkgrepo` state makes this possible:
|
||||
|
||||
```yaml
|
||||
repo-base:
|
||||
pkgrepo.managed:
|
||||
- name: base
|
||||
- humanname: CentOS-$releasever - Base
|
||||
- baseurl: http://pkg01.lab/centos/$releasever/os/$basearch/
|
||||
```
|
||||
|
||||
The tricky part is integrating this with reclass. First, the file for `pkg01.lab` has to be extended to define all exported repositories:
|
||||
|
||||
```yaml
|
||||
applications:
|
||||
- roles.localrepo
|
||||
|
||||
parameters:
|
||||
applications:
|
||||
localrepo:
|
||||
domain: "lab"
|
||||
repos:
|
||||
base:
|
||||
url: "centos/$releasever/os/$basearch"
|
||||
updates:
|
||||
url: "centos/$releasever/updates/$basearch"
|
||||
extras:
|
||||
url: "centos/$releasever/extras/$basearch"
|
||||
epel:
|
||||
url: "epel/$releasever/$basearch"
|
||||
```
|
||||
|
||||
Then, the mirror will be "advertised" to all servers on the `.lab` domain:
|
||||
|
||||
```yaml
|
||||
parameters:
|
||||
domain:
|
||||
lab:
|
||||
applications:
|
||||
localrepo:
|
||||
servers: $<aggregate_list("lab" in node.get('domain', {}).keys() and node.get('applications', {}).get('localrepo', None) is not None; dict(name=node['hostname'], repos=node['applications']['localrepo'].get('repos', [])))>
|
||||
```
|
||||
|
||||
Now, the `repos` role (from [here](https://github.com/hakoerber/salt-roles) [[direct link](https://raw.githubusercontent.com/hakoerber/salt-roles/master/repos.sls)]) parses this information and passes it to the relevant states.
|
||||
|
||||
This *would* even work with multiple mirrors exporting different repositories (the logic is there) to form kind of a high availability mirror cluster, but fails because the `pkgrepo` state ignores all URLs for `baseurl` except the first one, even though multiple URLs are supported by yum (see `yum.conf(5)`). Anyways, when using only a single mirror (which should be enough), it works as intended.
|
||||
|
||||
## A custom repository for non-default packages
|
||||
|
||||
Installing packages manually is always a bit of a bad habit in an automated environment. Updating and uninstalling is a pain, as is keeping an overview of what is installed where. For this reason, installing from packages should be preferred when possible. The problem is that building packages is a nightmare (at least RPMs and DEBs). This is what [fpm](https://github.com/jordansissel/fpm) aims to solve, by providing a way to create packages as easily as possible. This, together with a custom repo to distribute the packages, makes management of custom software much easier. It works like this:
|
||||
|
||||
First, a new repository is needed, called `custom`, that contains -- well -- custom packages. On our mirror server:
|
||||
|
||||
```shell
|
||||
[~]$ sudo mkdir -p /srv/www/packages/custom/centos/7/x86_64/
|
||||
```
|
||||
|
||||
Now we need something to put there. As an example, let's package the `syncrepo` script mentioned above. We need a user for building packages (building as root is evil™), and install fpm:
|
||||
|
||||
```shell
|
||||
[~]$ sudo useradd -d /var/build -m build
|
||||
[~]$ sudo yum install -y ruby-devel gcc rpmbuild createrepo
|
||||
[~]$ sudo -su build
|
||||
|
||||
build[~]$ cd ~
|
||||
build[~]$ gem install fpm
|
||||
```
|
||||
|
||||
Now, set up the directory structure for building the package and get the code:
|
||||
|
||||
```shell
|
||||
build[~]$ mkdir syncrepo
|
||||
build[~]$ mkdir syncrepo/package
|
||||
build[~]$ mkdir syncrepo/upstream
|
||||
build[~]$ cd syncrepo/upstream
|
||||
build[~]$ git clone https://github.com/hakoerber/syncrepo
|
||||
```
|
||||
|
||||
A Makefile is used to call fpm:
|
||||
|
||||
```makefile
|
||||
VERSION=1.0
|
||||
DESCRIPTION="Script to create and maintain a local yum package repository"
|
||||
URL=https://github.com/hakoerber/syncrepo
|
||||
|
||||
.PHONY: package
|
||||
package:
|
||||
(cd upstream && git pull origin master)
|
||||
fpm \
|
||||
-t rpm \
|
||||
-s dir \
|
||||
--package ./package/ \
|
||||
--name $(NAME) \
|
||||
--version $(VERSION) \
|
||||
--description $(DESCRIPTION) \
|
||||
--url $(URL) \
|
||||
--force \
|
||||
--depends rsync \
|
||||
--depends "python34" \
|
||||
--exclude "*/.git" \
|
||||
--config-files /etc/ \
|
||||
./upstream/syncrepo=/usr/bin/ \
|
||||
./upstream/repos.example=/etc/syncrepo.conf
|
||||
```
|
||||
|
||||
A simple
|
||||
|
||||
```shell
|
||||
build[~/syncrepo]$ make
|
||||
```
|
||||
|
||||
will now build the package and put it into the `package` directory. Nearly done! The only thing left is to make the package available over HTTP. First, it has to be copied into our custom repository:
|
||||
|
||||
```shell
|
||||
[~]$ sudo cp /var/build/syncrepo/package/syncrepo-1.0-1.x86_64.rpm /srv/srv/www/packages/custom/centos/7/x86_64/
|
||||
```
|
||||
|
||||
The last thing that has to be done on the server is building the repository metadata wtth `createrepo` (that was installed above):
|
||||
|
||||
```shell
|
||||
[~]$ sudo createrepo -v --no-database /srv/srv/www/packages/custom/centos/7/x86_64/
|
||||
```
|
||||
|
||||
To make the other servers use the repo, they have to know about it. Let's make salt do this. First, we again have to "advertise" the repo in reclass:
|
||||
|
||||
```yaml
|
||||
parameters:
|
||||
applications:
|
||||
localrepo:
|
||||
domain: "lab"
|
||||
repos:
|
||||
...
|
||||
custom:
|
||||
url: "custom/centos/$releasever/$basearch"
|
||||
```
|
||||
|
||||
After the next salt run, all servers will be able to access the custom repo, and we can install `syncrepo` the "clean" way with
|
||||
|
||||
```shell
|
||||
[~]$ sudo yum install -y syncrepo
|
||||
```
|
||||
|
||||
## Conclusion
|
||||
|
||||
That's all! Now, every server in the lab gets all its packages from a central, always up-to-date mirror, which speeds up downloading and is much nicer to the upstream mirrors. Also, custom RPMs can be made available to all servers to easily distribute custom or self-maintained software.
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,244 @@
|
|||
---
|
||||
date: 2017-08-19
|
||||
excerpt: Using the right tool for the right job
|
||||
tags:
|
||||
- ansible
|
||||
- puppet
|
||||
- config
|
||||
title: Ansible or Puppet? Why not both!
|
||||
toc: true
|
||||
---
|
||||
|
||||
# Introduction
|
||||
|
||||
At the [7th DevOps Camp](https://devops-camp.de/devops-camp-12-14-mai-2017/) in May 2017 I listened to a very interesting talk by [Frank Prechtel](https://twitter.com/frankprechtel) and [Andreas Heidoetting](https://www.xing.com/profile/andreas_heidoetting) called "Welche Software für Infrastructure as Code? --- Puppet vs. Chef vs. Ansible vs. Saltstack - welches Tool für welchen Einsatzzweck?" (Which software for Infrastructure as Code? --- Puppet vs. Chef vs. Ansible vs. Saltstack - which tool for what use case?).
|
||||
|
||||
In that talk, they arranged those tools along two axes:
|
||||
|
||||
* **Procedural** vs. **Declarative**
|
||||
* **Client-Server** vs **Client-only**
|
||||
|
||||
Ansible and Puppet differ on both of those metrics, which Ansible being a procedular client-only system and puppet being a declarative system following a client-server architecture.
|
||||
|
||||
In this post, I will focus on the first point, and how the differences lead to each system being stronger than the other in different problem domains.
|
||||
|
||||
First, let's look into the concept of "state" applied to configuration management
|
||||
|
||||
# State
|
||||
|
||||
Configuration management is all about managing state. The state encompasses everything you can think of on the target system: Contents and permissions of files, what processes are running, what packages are installed, users, groups, network configuration, device management and much more, you get the idea.
|
||||
|
||||
Now, you generally have a number of "target" states; there are simply the different server roles you have. You might have web servers, database servers, storage servers, and so on. Every server in one of those roles only differs minimally from other servers of the same class. For example, network configuration might be slightly different (IP addresses).
|
||||
|
||||
It is in your interest to always assert which state a system is in. We call the discrepancy between the actual state and the declared state "configuration drift". Configuration drift can generally be intoduced two different ways:
|
||||
|
||||
* Your actual state changes, and those changes were not introduced by the management system. This is usually the case when you change something on your systems manually (i.e. SSH into them)
|
||||
|
||||
* Your declared state changes, without those changes being applied to the systems. This happens when you neglect to do a configuration run after changing your configuration system.
|
||||
|
||||
The first point can be remedied by making the declared states as all-encompassing as possible, and as flexible as possible. When it's easier to go through your configuration mangement system to make changes --- even small ones --- there is not need to SSH into a box.
|
||||
|
||||
To remedy the second point, you need to apply all changes in the declared state to your systems as easily as possible, preferably automatically. Setting up a CI pipeline that runs your configuration management on every commit in the configuration repository makes sure that configuration drift does not begin to creep up.
|
||||
|
||||
To sum it up, any configuration management system benefits from the following:
|
||||
|
||||
* Broad scope, i.e. you can tune every parameter of the managed system
|
||||
* Flexibility, i.e. you can easily adapt the system to new requirements (new things to manage)
|
||||
* Speed, i.e. applying the declared state takes as little time as possible
|
||||
|
||||
# Procedural vs. Declarative
|
||||
|
||||
Now that we have a good idea of what "state" is, we can draw the destinction between the procedural and the declarative approch.
|
||||
|
||||
In the context of configuration management systems, "prodecural" vs "declarative" relate to *how* the system brings the managed entity (most often a server) into a new state. So, it's not about what you get in the end (state-wise), but the way there.
|
||||
|
||||
The descriptions here are very theoretical, and do not apply cleanly to the real world (spoiler: No configuration management system fits perfectly into one of those categories). Nevertheless, thinking about those two extremes helps with understanding the strenghts for each system (more on that later)
|
||||
|
||||
## Declarative
|
||||
|
||||
A declarative approach means we have to *declare* (duh) the state we want to have on the system (often in some kind of DSL), and the configuration management system's task is to transition whatever state it finds into the declared state.
|
||||
|
||||
We define the target state (green), and do not have to care about whatever state there currently is on the system, nor about state transitions. This is all in the tool's hands.
|
||||
|
||||
```viz-dot
|
||||
rank = same
|
||||
rankdir = LR
|
||||
ranksep = 1.5
|
||||
margin = 0.5
|
||||
|
||||
node [
|
||||
color = black
|
||||
fontsize = 14
|
||||
shape = box
|
||||
fontname = sans
|
||||
margin = "0.5,0.3"
|
||||
]
|
||||
|
||||
edge [
|
||||
color = black
|
||||
fontsize = 14
|
||||
shape = plaintext
|
||||
fontname = sans
|
||||
style = dashed
|
||||
]
|
||||
|
||||
"target state" [
|
||||
fillcolor = green
|
||||
style = filled
|
||||
]
|
||||
|
||||
"state 1" -> "target state"
|
||||
"state 2" -> "target state"
|
||||
"state 3" -> "target state"
|
||||
```
|
||||
|
||||
|
||||
The cool thing about the declarative approach is that it scales lineary with the number of states: When you introduce a new system with a new target state, you only have to write one declaration for that system, and you're done. It also gives you a nice sense of confidence: When our declaration is sufficently comprehensive, we can be certain that our system is in line with our configuration.
|
||||
|
||||
The big problem with that approach is the complexity it brings to the tool itself. The tool has to ananlyize each resource it is expected to bring into the desired state and figure out the steps it has to take. This might not directly impact you (it's more of a problem for the guys writing the config management tool), but this complexity might (and does, in my experience) leak into the use of that system, through leaky abstractions.
|
||||
|
||||
Also, to be really useful, a declarative configuration management system needs to be all-encompassing. Let me tell you why.
|
||||
|
||||
You might have experienced the following scenario: Assume you have some kind of `conf.d` directory. This is common to split configuration of a program into several files. Two examples that come to my mind are cron (`/etc/cron.d`) and rsyslog (`/etc/rsyslog.d`). There are usually three ways how configuration files might end up in that directory:
|
||||
|
||||
* defaults, installed with the package itself
|
||||
* files from other packages (this is done extensively with `/etc/logrotate.d`
|
||||
* files from your configuration management system
|
||||
|
||||
To actually be sure that, regardless of what's currently in that directory, you end up with the files you want, your configuration management tool has to "take over" that whole directory.
|
||||
|
||||
Similarly, a true delarative tool would remove all packages from a system that it does not know about. To turn it around, this means that you have to *tell* the system about all packages you want to install.
|
||||
|
||||
Now, sometimes this is exactly what you want (you actually want those superfluous packages gone and sometimes you can split a `conf.d` directory into multiple ones) but this case nevertheless shows that the default state of a declarative tool it to "own" the system.
|
||||
|
||||
Pros:
|
||||
|
||||
* Independence of current states makes it suitable for heterogenous environments
|
||||
* Confidence in the target state
|
||||
* Scales linearily with the ammout of different states
|
||||
* Idempotence "built-in"
|
||||
|
||||
Cons:
|
||||
|
||||
* The tools need to be quite heavy and complex
|
||||
* To leverage the whole power, you need to delare your whole system and let the configuration management system "own it"
|
||||
|
||||
## Procedural
|
||||
|
||||
A procedural system simply applies a set of predefined state transitions (green) to reach a new state:
|
||||
|
||||
{% digraph some graph title %}
|
||||
rank = same
|
||||
rankdir = LR
|
||||
ranksep = 1.5
|
||||
margin = 0.5
|
||||
|
||||
node [
|
||||
color = black
|
||||
fontsize = 14
|
||||
shape = box
|
||||
fontname = sans
|
||||
margin = "0.5,0.3"
|
||||
]
|
||||
|
||||
edge [
|
||||
color = green
|
||||
fontsize = 14
|
||||
shape = plaintext
|
||||
fontname = sans
|
||||
style = dashed
|
||||
]
|
||||
|
||||
"state" -> "new state"
|
||||
{% enddigraph %}
|
||||
|
||||
With the state transition being predefined, this means that the new state is dependent on the old state. As long as you can be sure of the state of the system, this is not an issue. But, as soon as you have any configuration drift, for example introduced by manual intervention, your have a new state and therefore need a new state transition:
|
||||
|
||||
{% digraph some graph title %}
|
||||
rank = same
|
||||
rankdir = LR
|
||||
ranksep = 1.5
|
||||
margin = 0.5
|
||||
|
||||
node [
|
||||
color = black
|
||||
fontsize = 14
|
||||
shape = box
|
||||
fontname = sans
|
||||
margin = "0.5,0.3"
|
||||
]
|
||||
|
||||
edge [
|
||||
color = green
|
||||
fontsize = 14
|
||||
shape = plaintext
|
||||
fontname = sans
|
||||
style = dashed
|
||||
]
|
||||
|
||||
"state 1" -> "new state"
|
||||
"state 2" -> "new state"
|
||||
{% enddigraph %}
|
||||
|
||||
As you can see, the more initial states you have, the more state transitions you have to maintain. Now imagine you have different target states, and watch the complexity exploding:
|
||||
|
||||
{% digraph some graph title %}
|
||||
rank = same
|
||||
rankdir = LR
|
||||
ranksep = 1.5
|
||||
margin = 0.5
|
||||
|
||||
node [
|
||||
color = black
|
||||
fontsize = 14
|
||||
shape = box
|
||||
fontname = sans
|
||||
margin = "0.5,0.3"
|
||||
]
|
||||
|
||||
edge [
|
||||
color = green
|
||||
fontsize = 14
|
||||
shape = plaintext
|
||||
fontname = sans
|
||||
style = dashed
|
||||