1
1
Fork 0
mirror of https://gitlab.archlinux.org/archlinux/infrastructure.git synced 2024-04-26 04:05:10 +02:00

Make ansible-lint happy

yaml: truthy value should be one of [false, true] (truthy)
yaml: wrong indentation: expected 4 but found 2 (indentation)
yaml: too few spaces before comment (comments)
yaml: missing starting space in comment (comments)
yaml: too many blank lines (1 > 0) (empty-lines)
yaml: too many spaces after colon (colons)
yaml: comment not indented like content (comments-indentation)
yaml: no new line character at the end of file (new-line-at-end-of-file)
load-failure: Failed to load or parse file
parser-error: couldn't resolve module/action 'hosts'. This often indicates a misspelling, missing collection, or incorrect module path.
This commit is contained in:
Kristian Klausen 2021-02-14 14:05:32 +01:00
parent f99cca5e3b
commit 4112bdf9fd
69 changed files with 784 additions and 787 deletions

View File

@ -1,8 +1,12 @@
exclude_paths:
- misc
# FIXME: parser-error: couldn't resolve module/action 'hosts'. This often indicates a misspelling, missing collection, or incorrect module path.
- playbooks/tasks
skip_list:
# line too long (x > 80 characters) (line-length)
- 'line-length'
# yaml: too many spaces inside braces (braces)
- 'braces'
# Do not recommend running tasks as handlers
- 'no-handler'
# Do not force galaxy info in meta/main.yml

View File

@ -11,7 +11,8 @@ ansible-lint:
# Fix syntax-check rule (https://github.com/ansible-community/ansible-lint/issues/1350#issuecomment-778764110)
- sed "s/,hcloud_inventory.py//" -i ansible.cfg
- sed "/^vault_password_file/d" -i ansible.cfg
- ansible-lint
# Fix load-failure: Failed to load or parse file
- ansible-lint $(printf -- "--exclude %s " */*/vault_*)
terraform-validate:
script:

View File

@ -222,7 +222,7 @@ arch_users:
ssh_key: foxxx0.pub
shell: /bin/zsh
groups:
- tu
- tu
fukawi2:
name: "Phillip Smith"
ssh_key: fukawi2.pub

View File

@ -24,10 +24,10 @@ root_ssh_keys:
# run playbook 'playbooks/tasks/reencrypt-vault-key.yml' when this changes
# before running it, make sure to gpg --lsign-key all of the below keys
root_gpgkeys:
- 86CFFCA918CF3AF47147588051E8B148A9999C34 # foutrelis
- 05C7775A9E8B977407FE08E69D4C5AA15426DA0A # freswa
- ECCAC84C1BA08A6CC8E63FBBF22FB1D78A77AEAB # grazzolini
- A2FF3A36AAA56654109064AB19802F8B0D70FC30 # heftig
- E499C79F53C96A54E572FEE1C06086337C50773E # jelle
- 8FC15A064950A99DD1BD14DD39E4B877E62EB915 # svenstaro
- E240B57E2C4630BA768E2F26FC1B547C8D8172C8 # anthraxx
- 86CFFCA918CF3AF47147588051E8B148A9999C34 # foutrelis
- 05C7775A9E8B977407FE08E69D4C5AA15426DA0A # freswa
- ECCAC84C1BA08A6CC8E63FBBF22FB1D78A77AEAB # grazzolini
- A2FF3A36AAA56654109064AB19802F8B0D70FC30 # heftig
- E499C79F53C96A54E572FEE1C06086337C50773E # jelle
- 8FC15A064950A99DD1BD14DD39E4B877E62EB915 # svenstaro
- E240B57E2C4630BA768E2F26FC1B547C8D8172C8 # anthraxx

View File

@ -295,194 +295,194 @@ arch_users:
- devops
- tu
- multilib
# jgc:
# name: "Jan de Groot"
# ssh_key: jgc.pub
# groups:
# - dev
# - multilib
# - tu
# jleclanche:
# name: "Jerome Leclanche"
# ssh_key: jleclanche.pub
# shell: /bin/zsh
# groups:
# - tu
# jlichtblau:
# name: "Jaroslav Lichtblau"
# ssh_key: jlichtblau.pub
# groups:
# - tu
# jouke:
# name: "Jouke Witteveen"
# ssh_key: jouke.pub
# groups:
# - ""
# jsteel:
# name: "Jonathan Steel"
# ssh_key: jsteel.pub
# groups:
# - tu
# juergen:
# name: "Jürgen Hötzel"
# ssh_key: juergen.pub
# groups:
# - dev
# - multilib
# - tu
# kgizdov:
# name: "Konstantin Gizdov"
# ssh_key: kgizdov.pub
# groups:
# - tu
# kkeen:
# name: "Kyle Keen"
# ssh_key: kkeen.pub
# groups:
# - tu
# - multilib
# lcarlier:
# name: "Laurent Carlier"
# ssh_key: lcarlier.pub
# groups:
# - dev
# - tu
# - multilib
# lfleischer:
# name: "Lukas Fleischer"
# ssh_key: lfleischer.pub
# shell: /bin/zsh
# groups:
# - dev
# - tu
# - multilib
# maximbaz:
# name: "Maxim Baz"
# ssh_key: maximbaz.pub
# groups:
# - tu
# mtorromeo:
# name: "Massimiliano Torromeo"
# ssh_key: mtorromeo.pub
# groups:
# - tu
# muflone:
# name: "Fabio Castelli"
# ssh_key: muflone.pub
# groups:
# - tu
# nicohood:
# name: "NicoHood"
# ssh_key: nicohood.pub
# groups:
# - tu
# pierre:
# name: "Pierre Schmitz"
# ssh_key: pierre.pub
# groups:
# - dev
# - multilib
# - tu
# polyzen:
# name: "Daniel M. Capella"
# ssh_key: polyzen.pub
# groups:
# - tu
# remy:
# name: "Rémy Oudompheng"
# ssh_key: remy.pub
# groups:
# - dev
# - tu
# ronald:
# name: "Ronald van Haren"
# ssh_key: ronald.pub
# groups:
# - dev
# - tu
# sangy:
# name: "Santiago Torres-Arias"
# ssh_key: sangy.pub
# groups:
# - tu
# - docker-image-sudo
# schuay:
# name: "Jakob Gruber"
# ssh_key: schuay.pub
# groups:
# - tu
# - multilib
# scimmia:
# name: "Doug Newgard"
# ssh_key: scimmia.pub
# groups: []
# morganamilo:
# name: "Morgan Adamiec"
# ssh_key: morganamilo.pub
# groups: []
# seblu:
# name: "Sébastien Luttringer"
# ssh_key: seblu.pub
# shell: /bin/zsh
# groups:
# - dev
# - tu
# - multilib
# shibumi:
# name: "Christian Rebischke"
# ssh_key: shibumi.pub
# shell: /bin/zsh
# groups:
# - tu
# - archboxes-sudo
# kpcyrd:
# name: "Kpcyrd"
# ssh_key: kpcyrd.pub
# groups:
# - tu
# spupykin:
# name: "Sergej Pupykin"
# ssh_key: spupykin.pub
# groups:
# - tu
# - multilib
# svenstaro:
# name: "Sven-Hendrik Haase"
# ssh_key: svenstaro.pub
# groups:
# - dev
# - devops
# - tu
# - multilib
# tensor5:
# name: "Nicola Squartini"
# ssh_key: tensor5.pub
# groups:
# - tu
# tpowa:
# name: "Tobias Powalowski"
# ssh_key: tpowa.pub
# groups:
# - dev
# - multilib
# - tu
# wild:
# name: "Dan Printzell"
# ssh_key: wild.pub
# groups:
# - tu
# xyne:
# name: "Xyne"
# ssh_key: xyne.pub
# groups:
# - tu
# yan12125:
# name: "Chih-Hsuan Yen"
# ssh_key: yan12125.pub
# groups:
# - tu
# zorun:
# name: "Baptiste Jonglez"
# ssh_key: zorun.pub
# groups:
# - tu
# jgc:
# name: "Jan de Groot"
# ssh_key: jgc.pub
# groups:
# - dev
# - multilib
# - tu
# jleclanche:
# name: "Jerome Leclanche"
# ssh_key: jleclanche.pub
# shell: /bin/zsh
# groups:
# - tu
# jlichtblau:
# name: "Jaroslav Lichtblau"
# ssh_key: jlichtblau.pub
# groups:
# - tu
# jouke:
# name: "Jouke Witteveen"
# ssh_key: jouke.pub
# groups:
# - ""
# jsteel:
# name: "Jonathan Steel"
# ssh_key: jsteel.pub
# groups:
# - tu
# juergen:
# name: "Jürgen Hötzel"
# ssh_key: juergen.pub
# groups:
# - dev
# - multilib
# - tu
# kgizdov:
# name: "Konstantin Gizdov"
# ssh_key: kgizdov.pub
# groups:
# - tu
# kkeen:
# name: "Kyle Keen"
# ssh_key: kkeen.pub
# groups:
# - tu
# - multilib
# lcarlier:
# name: "Laurent Carlier"
# ssh_key: lcarlier.pub
# groups:
# - dev
# - tu
# - multilib
# lfleischer:
# name: "Lukas Fleischer"
# ssh_key: lfleischer.pub
# shell: /bin/zsh
# groups:
# - dev
# - tu
# - multilib
# maximbaz:
# name: "Maxim Baz"
# ssh_key: maximbaz.pub
# groups:
# - tu
# mtorromeo:
# name: "Massimiliano Torromeo"
# ssh_key: mtorromeo.pub
# groups:
# - tu
# muflone:
# name: "Fabio Castelli"
# ssh_key: muflone.pub
# groups:
# - tu
# nicohood:
# name: "NicoHood"
# ssh_key: nicohood.pub
# groups:
# - tu
# pierre:
# name: "Pierre Schmitz"
# ssh_key: pierre.pub
# groups:
# - dev
# - multilib
# - tu
# polyzen:
# name: "Daniel M. Capella"
# ssh_key: polyzen.pub
# groups:
# - tu
# remy:
# name: "Rémy Oudompheng"
# ssh_key: remy.pub
# groups:
# - dev
# - tu
# ronald:
# name: "Ronald van Haren"
# ssh_key: ronald.pub
# groups:
# - dev
# - tu
# sangy:
# name: "Santiago Torres-Arias"
# ssh_key: sangy.pub
# groups:
# - tu
# - docker-image-sudo
# schuay:
# name: "Jakob Gruber"
# ssh_key: schuay.pub
# groups:
# - tu
# - multilib
# scimmia:
# name: "Doug Newgard"
# ssh_key: scimmia.pub
# groups: []
# morganamilo:
# name: "Morgan Adamiec"
# ssh_key: morganamilo.pub
# groups: []
# seblu:
# name: "Sébastien Luttringer"
# ssh_key: seblu.pub
# shell: /bin/zsh
# groups:
# - dev
# - tu
# - multilib
# shibumi:
# name: "Christian Rebischke"
# ssh_key: shibumi.pub
# shell: /bin/zsh
# groups:
# - tu
# - archboxes-sudo
# kpcyrd:
# name: "Kpcyrd"
# ssh_key: kpcyrd.pub
# groups:
# - tu
# spupykin:
# name: "Sergej Pupykin"
# ssh_key: spupykin.pub
# groups:
# - tu
# - multilib
# svenstaro:
# name: "Sven-Hendrik Haase"
# ssh_key: svenstaro.pub
# groups:
# - dev
# - devops
# - tu
# - multilib
# tensor5:
# name: "Nicola Squartini"
# ssh_key: tensor5.pub
# groups:
# - tu
# tpowa:
# name: "Tobias Powalowski"
# ssh_key: tpowa.pub
# groups:
# - dev
# - multilib
# - tu
# wild:
# name: "Dan Printzell"
# ssh_key: wild.pub
# groups:
# - tu
# xyne:
# name: "Xyne"
# ssh_key: xyne.pub
# groups:
# - tu
# yan12125:
# name: "Chih-Hsuan Yen"
# ssh_key: yan12125.pub
# groups:
# - tu
# zorun:
# name: "Baptiste Jonglez"
# ssh_key: zorun.pub
# groups:
# - tu

View File

@ -9,7 +9,7 @@
- { role: firewalld }
- { role: unbound }
# reconfiguring sshd may break the AUR on luna (unchecked)
#- { role: sshd, tags: ['sshd'] }
# - { role: sshd, tags: ['sshd'] }
- { role: root_ssh }
- { role: borg_client, tags: ["borg"], when: "'borg_clients' in group_names" }
- { role: hardening }

View File

@ -3,18 +3,18 @@
- name: "prepare postgres ssl hosts list"
hosts: archlinux.org
tasks:
- name: assign ipv4 addresses to fact postgres_ssl_hosts4
set_fact: postgres_ssl_hosts4="{{ [gemini4] + detected_ips }}"
vars:
gemini4: "{{ hostvars['gemini.archlinux.org']['ipv4_address'] }}/32"
detected_ips: "{{ groups['mirrors'] | map('extract', hostvars, ['ipv4_address']) | select() | map('regex_replace', '^(.+)$', '\\1/32') | list }}"
tags: ["postgres", "firewall"]
- name: assign ipv6 addresses to fact postgres_ssl_hosts6
set_fact: postgres_ssl_hosts6="{{ [gemini6] + detected_ips }}"
vars:
gemini6: "{{ hostvars['gemini.archlinux.org']['ipv6_address'] }}/128"
detected_ips: "{{ groups['mirrors'] | map('extract', hostvars, ['ipv6_address']) | select() | map('regex_replace', '^(.+)$', '\\1/128') | list }}"
tags: ["postgres", "firewall"]
- name: assign ipv4 addresses to fact postgres_ssl_hosts4
set_fact: postgres_ssl_hosts4="{{ [gemini4] + detected_ips }}"
vars:
gemini4: "{{ hostvars['gemini.archlinux.org']['ipv4_address'] }}/32"
detected_ips: "{{ groups['mirrors'] | map('extract', hostvars, ['ipv4_address']) | select() | map('regex_replace', '^(.+)$', '\\1/32') | list }}"
tags: ["postgres", "firewall"]
- name: assign ipv6 addresses to fact postgres_ssl_hosts6
set_fact: postgres_ssl_hosts6="{{ [gemini6] + detected_ips }}"
vars:
gemini6: "{{ hostvars['gemini.archlinux.org']['ipv6_address'] }}/128"
detected_ips: "{{ groups['mirrors'] | map('extract', hostvars, ['ipv6_address']) | select() | map('regex_replace', '^(.+)$', '\\1/128') | list }}"
tags: ["postgres", "firewall"]
- name: setup archlinux.org
hosts: archlinux.org

View File

@ -10,7 +10,7 @@
- { role: root_ssh }
- { role: certbot }
- { role: nginx }
- { role: mariadb, mariadb_query_cache_type: '0', mariadb_innodb_file_per_table: True }
- { role: mariadb, mariadb_query_cache_type: '0', mariadb_innodb_file_per_table: true }
- { role: sudo }
- { role: php_fpm, php_extensions: ['iconv', 'memcached', 'mysqli', 'pdo_mysql'], zend_extensions: ['opcache'] }
- { role: memcached }

View File

@ -11,7 +11,7 @@
- { role: prometheus_exporters }
- { role: certbot }
- { role: nginx }
- { role: mariadb, mariadb_query_cache_type: '0', mariadb_innodb_file_per_table: True, mariadb_innodb_buffer_pool_size: '1G' }
- { role: mariadb, mariadb_query_cache_type: '0', mariadb_innodb_file_per_table: true, mariadb_innodb_buffer_pool_size: '1G' }
- { role: sudo }
- { role: php_fpm, php_extensions: ['iconv', 'memcached', 'mysqli', 'pdo_mysql'], zend_extensions: ['opcache'] }
- { role: memcached }

View File

@ -10,7 +10,7 @@
- { role: root_ssh }
- { role: certbot }
- { role: nginx }
- { role: mariadb, mariadb_query_cache_type: '0', mariadb_innodb_file_per_table: True }
- { role: mariadb, mariadb_query_cache_type: '0', mariadb_innodb_file_per_table: true }
- { role: sudo }
- { role: php_fpm, php_extensions: ['apcu', 'iconv', 'intl', 'mysqli'], zend_extensions: ['opcache'] }
- { role: fluxbb }

View File

@ -10,7 +10,7 @@
- { role: root_ssh }
- { role: certbot }
- { role: nginx }
- { role: mariadb, mariadb_query_cache_type: '0', mariadb_innodb_file_per_table: True }
- { role: mariadb, mariadb_query_cache_type: '0', mariadb_innodb_file_per_table: true }
- { role: sudo }
- { role: php7_fpm, php_extensions: ['mysqli'], zend_extensions: ['opcache'] }
- { role: flyspray }

View File

@ -13,8 +13,7 @@
gitlab_domain: "gitlab.archlinux.org",
gitlab_primary_addresses: ['159.69.41.129', '[2a01:4f8:c2c:5d2d::1]', '127.0.0.1', '[::1]'],
gitlab_pages_http_addresses: ['116.203.6.156:80', '[2a01:4f8:c2c:5d2d::2]:80'],
gitlab_pages_https_addresses: ['116.203.6.156:443', '[2a01:4f8:c2c:5d2d::2]:443']
}
gitlab_pages_https_addresses: ['116.203.6.156:443', '[2a01:4f8:c2c:5d2d::2]:443']}
- { role: borg_client, tags: ["borg"] }
- { role: prometheus_exporters }
- { role: fail2ban }

View File

@ -2,6 +2,6 @@
- name: setup Hetzner storagebox account
hosts: u236610.your-storagebox.de
gather_facts: False
gather_facts: false
roles:
- { role: hetzner_storagebox, backup_dir: "backup", backup_clients: "{{ groups['borg_clients'] }}", tags: ["borg"] }

View File

@ -27,7 +27,7 @@
roles:
- nginx
- rspamd
- { role: mariadb, mariadb_query_cache_type: '0', mariadb_innodb_file_per_table: True }
- { role: mariadb, mariadb_query_cache_type: '0', mariadb_innodb_file_per_table: true }
- { role: prometheus_exporters }
# luna is hosting mailman lists; this postfix role does not cater to this yet
# TODO: make postfix role handle mailman config?

View File

@ -2,6 +2,6 @@
- name: setup rsync.net account
hosts: prio.ch-s012.rsync.net
gather_facts: False
gather_facts: false
roles:
- { role: rsync_net, backup_dir: "backup", backup_clients: "{{ groups['borg_clients'] }}", tags: ["borg"] }

View File

@ -3,36 +3,36 @@
- name: prepare local storage directory
hosts: 127.0.0.1
tasks:
- name: create borg-keys directory
file: path="{{ playbook_dir }}/../../borg-keys/" state=directory # noqa 208
- name: create borg-keys directory
file: path="{{ playbook_dir }}/../../borg-keys/" state=directory # noqa 208
- name: fetch borg keys
hosts: borg_clients
tasks:
- name: fetch borg key
command: "/usr/local/bin/borg key export :: /dev/stdout"
register: borg_key
changed_when: "borg_key.rc == 0"
- name: fetch borg key
command: "/usr/local/bin/borg key export :: /dev/stdout"
register: borg_key
changed_when: "borg_key.rc == 0"
- name: fetch borg offsite key
command: "/usr/local/bin/borg-offsite key export :: /dev/stdout"
register: borg_offsite_key
changed_when: "borg_offsite_key.rc == 0"
- name: fetch borg offsite key
command: "/usr/local/bin/borg-offsite key export :: /dev/stdout"
register: borg_offsite_key
changed_when: "borg_offsite_key.rc == 0"
- name: save borg key
shell: gpg --batch --armor --encrypt --output - >"{{ playbook_dir }}/../../borg-keys/{{ inventory_hostname }}.gpg" {% for userid in root_gpgkeys %}--recipient {{ userid }} {% endfor %}
args:
stdin: "{{ borg_key.stdout }}"
chdir: "{{ playbook_dir }}/../.."
delegate_to: localhost
register: gpg_key
changed_when: "gpg_key.rc == 0"
- name: save borg key
shell: gpg --batch --armor --encrypt --output - >"{{ playbook_dir }}/../../borg-keys/{{ inventory_hostname }}.gpg" {% for userid in root_gpgkeys %}--recipient {{ userid }} {% endfor %}
args:
stdin: "{{ borg_key.stdout }}"
chdir: "{{ playbook_dir }}/../.."
delegate_to: localhost
register: gpg_key
changed_when: "gpg_key.rc == 0"
- name: save borg offsite key
shell: gpg --batch --armor --encrypt --output - >"{{ playbook_dir }}/../../borg-keys/{{ inventory_hostname }}-offsite.gpg" {% for userid in root_gpgkeys %}--recipient {{ userid }} {% endfor %}
args:
stdin: "{{ borg_offsite_key.stdout }}"
chdir: "{{ playbook_dir }}/../.."
delegate_to: localhost
register: gpg_offsite_key
changed_when: "gpg_offsite_key.rc == 0"
- name: save borg offsite key
shell: gpg --batch --armor --encrypt --output - >"{{ playbook_dir }}/../../borg-keys/{{ inventory_hostname }}-offsite.gpg" {% for userid in root_gpgkeys %}--recipient {{ userid }} {% endfor %}
args:
stdin: "{{ borg_offsite_key.stdout }}"
chdir: "{{ playbook_dir }}/../.."
delegate_to: localhost
register: gpg_offsite_key
changed_when: "gpg_offsite_key.rc == 0"

View File

@ -30,4 +30,3 @@
- name: upload website
unarchive: src={{ tempdir.path }}/pacman/pacman-{{ pacman_version }}/doc/website.tar.gz dest={{ archweb_dir }}/archlinux.org/pacman mode=0644
delegate_to: archlinux.org

View File

@ -3,48 +3,48 @@
- name: fetch ssh hostkeys
hosts: all,!rsync_net,!hetzner_storageboxes
tasks:
- name: fetch hostkey checksums
shell: "for type in sha256 md5; do for file in /etc/ssh/ssh_host_*.pub; do ssh-keygen -l -f $file -E $type; done; echo; done"
register: ssh_hostkeys
changed_when: ssh_hostkeys | length > 0
- name: fetch known_hosts
shell: "set -o pipefail && ssh-keyscan 127.0.0.1 2>/dev/null | sed 's#^127.0.0.1#{{ inventory_hostname }}#' | sort"
environment:
LC_COLLATE: C # to ensure reproducible ordering
args:
executable: /bin/bash # required for repro3.pkgbuild.com which is ubuntu and has dash as default shell
register: known_hosts
changed_when: known_hosts | length > 0
- name: fetch hostkey checksums
shell: "for type in sha256 md5; do for file in /etc/ssh/ssh_host_*.pub; do ssh-keygen -l -f $file -E $type; done; echo; done"
register: ssh_hostkeys
changed_when: ssh_hostkeys | length > 0
- name: fetch known_hosts
shell: "set -o pipefail && ssh-keyscan 127.0.0.1 2>/dev/null | sed 's#^127.0.0.1#{{ inventory_hostname }}#' | sort"
environment:
LC_COLLATE: C # to ensure reproducible ordering
args:
executable: /bin/bash # required for repro3.pkgbuild.com which is ubuntu and has dash as default shell
register: known_hosts
changed_when: known_hosts | length > 0
- name: store hostkeys
hosts: localhost
tasks:
- name: store hostkeys
copy:
dest: "{{ playbook_dir }}/../../docs/ssh-hostkeys.txt"
content: "{% for host in query('inventory_hostnames', 'all,!rsync_net,!hetzner_storageboxes,!localhost') | sort %}# {{ host }}\n{{ hostvars[host].ssh_hostkeys.stdout }}\n\n{% endfor %}"
mode: preserve
delegate_to: localhost
- name: store known_hosts
copy:
dest: "{{ playbook_dir }}/../../docs/ssh-known_hosts.txt"
content: "{% for host in query('inventory_hostnames', 'all,!rsync_net,!hetzner_storageboxes,!localhost') | sort %}# {{ host }}\n{{ hostvars[host].known_hosts.stdout }}\n\n{% endfor %}"
mode: preserve
delegate_to: localhost
- name: manually append rsync.net host keys
lineinfile:
path: "{{ playbook_dir }}/../../docs/ssh-known_hosts.txt"
line: "{% for host in query('inventory_hostnames', 'rsync_net') | sort %}# {{ host }}\n{{ hostvars[host].known_host }}\n\n{% endfor %}"
delegate_to: localhost
- name: manually append Hetzner Storageboxes host keys
lineinfile:
path: "{{ playbook_dir }}/../../docs/ssh-known_hosts.txt"
line: "{% for host in query('inventory_hostnames', 'hetzner_storageboxes') | sort %}# {{ host }}\n{{ hostvars[host].known_host }}\n\n{% endfor %}"
delegate_to: localhost
- name: store hostkeys
copy:
dest: "{{ playbook_dir }}/../../docs/ssh-hostkeys.txt"
content: "{% for host in query('inventory_hostnames', 'all,!rsync_net,!hetzner_storageboxes,!localhost') | sort %}# {{ host }}\n{{ hostvars[host].ssh_hostkeys.stdout }}\n\n{% endfor %}"
mode: preserve
delegate_to: localhost
- name: store known_hosts
copy:
dest: "{{ playbook_dir }}/../../docs/ssh-known_hosts.txt"
content: "{% for host in query('inventory_hostnames', 'all,!rsync_net,!hetzner_storageboxes,!localhost') | sort %}# {{ host }}\n{{ hostvars[host].known_hosts.stdout }}\n\n{% endfor %}"
mode: preserve
delegate_to: localhost
- name: manually append rsync.net host keys
lineinfile:
path: "{{ playbook_dir }}/../../docs/ssh-known_hosts.txt"
line: "{% for host in query('inventory_hostnames', 'rsync_net') | sort %}# {{ host }}\n{{ hostvars[host].known_host }}\n\n{% endfor %}"
delegate_to: localhost
- name: manually append Hetzner Storageboxes host keys
lineinfile:
path: "{{ playbook_dir }}/../../docs/ssh-known_hosts.txt"
line: "{% for host in query('inventory_hostnames', 'hetzner_storageboxes') | sort %}# {{ host }}\n{{ hostvars[host].known_host }}\n\n{% endfor %}"
delegate_to: localhost
- name: upload known_hosts to all nodes
hosts: all,!rsync_net,!hetzner_storageboxes
tasks:
- name: upload known_hosts
copy: dest=/etc/ssh/ssh_known_hosts src="{{ playbook_dir }}/../../docs/ssh-known_hosts.txt" owner=root group=root mode=0644
tags: ['upload-known-hosts']
- name: upload known_hosts
copy: dest=/etc/ssh/ssh_known_hosts src="{{ playbook_dir }}/../../docs/ssh-known_hosts.txt" owner=root group=root mode=0644
tags: ['upload-known-hosts']

View File

@ -12,7 +12,7 @@
- { role: certbot }
- { role: nginx }
- { role: postfix, postfix_relayhost: "mail.archlinux.org" }
- { role: mariadb, mariadb_query_cache_type: '0', mariadb_innodb_file_per_table: True }
- { role: mariadb, mariadb_query_cache_type: '0', mariadb_innodb_file_per_table: true }
- { role: sudo }
- { role: php_fpm, php_extensions: ['bcmath', 'curl', 'gd', 'iconv', 'intl', 'mysqli', 'sockets', 'zip'], zend_extensions: ['opcache'] }
- { role: memcached }

View File

@ -22,9 +22,9 @@
- name: start and enable syncrepo unit
systemd:
name: syncrepo_arch32.timer
enabled: yes
enabled: true
state: started
daemon_reload: yes
daemon_reload: true
- name: make nginx log dir
file: path=/var/log/nginx/{{ arch32_mirror_domain }} state=directory owner=root group=root mode=0755

View File

@ -2,4 +2,4 @@
- name: daemon reload
systemd:
daemon-reload: yes
daemon-reload: true

View File

@ -32,12 +32,12 @@
- name: configure archive.org client
command: ia configure --username={{ vault_archive_username }} --password={{ vault_archive_password }} creates={{ archive_user_home }}/.config/ia.ini
become: yes
become: true
become_user: "{{ archive_user_name }}"
- name: clone archive uploader code
git: repo=https://github.com/archlinux/arch-historical-archive.git dest="{{ archive_repo }}" version="{{ archive_uploader_version }}"
become: yes
become: true
become_user: "{{ archive_user_name }}"
- name: install system service
@ -49,6 +49,6 @@
- name: start uploader timer
systemd:
name: archive-uploader.timer
enabled: yes
enabled: true
state: started
daemon_reload: yes
daemon_reload: true

View File

@ -5,7 +5,7 @@ archmanweb_domain: 'man.archlinux.org'
archmanweb_allowed_hosts: ["{{ archmanweb_domain }}"]
archmanweb_nginx_conf: '/etc/nginx/nginx.d/archmanweb.conf'
archmanweb_repository: 'https://gitlab.archlinux.org/archlinux/archmanweb.git'
#archmanweb_pgp_key: ['932BA3FA0C86812A32D1F54DAB5964AEB9FEDDDC'] # Jakub Klinkovský (lahwaacz)
# archmanweb_pgp_key: ['932BA3FA0C86812A32D1F54DAB5964AEB9FEDDDC'] # Jakub Klinkovský (lahwaacz)
archmanweb_forced_deploy: false
archmanweb_db: 'archmanweb'

View File

@ -2,7 +2,7 @@
- name: daemon reload
systemd:
daemon-reload: yes
daemon-reload: true
- name: restart archweb memcached
service: name=archweb-memcached state=restarted

View File

@ -216,9 +216,9 @@
- name: start and enable archweb memcached service and archweb-rsync_iso timer
systemd:
name: "{{ item }}"
enabled: yes
enabled: true
state: started
daemon_reload: yes
daemon_reload: true
with_items:
- archweb-memcached.service
- archweb-rsync_iso.timer

View File

@ -105,9 +105,9 @@
- name: start and enable archwiki timers and services
systemd:
name: "{{ item }}"
enabled: yes
enabled: true
state: started
daemon_reload: yes
daemon_reload: true
with_items:
- archwiki-runjobs.timer
- archwiki-prune-cache.timer
@ -118,7 +118,7 @@
systemd:
name: archwiki-question-updater.service
state: started
daemon_reload: yes
daemon_reload: true
- name: ensure question answer file exists and set permissions
file: state=file path="{{ archwiki_question_answer_file }}" owner=root group=root mode=0644

View File

@ -25,4 +25,4 @@ aurweb_cache_pkginfo_ttl: '86400'
aurweb_request_limt: '4000'
aurweb_window_length: '86400'
aurweb_memcached_socket: '/run/memcached/aurweb.sock'
aurweb_memcached_memory: 2048
aurweb_memcached_memory: 2048

View File

@ -2,7 +2,7 @@
- name: daemon reload
systemd:
daemon-reload: yes
daemon-reload: true
- name: restart php-fpm@{{ aurweb_user }}
service: name=php-fpm@{{ aurweb_user }} state=restarted

View File

@ -104,7 +104,7 @@
- name: Check python module availability
command: "python3 -c 'import aurweb'"
ignore_errors: yes
ignore_errors: true
register: aurweb_installed
tags:
- skip_ansible_lint
@ -117,14 +117,14 @@
- name: Generate HTML documentation
make:
chdir: "{{ aurweb_dir }}/doc"
chdir: "{{ aurweb_dir }}/doc"
become: true
become_user: "{{ aurweb_user }}"
- name: Generate Translations
make:
chdir: "{{ aurweb_dir }}/po"
target: "install"
chdir: "{{ aurweb_dir }}/po"
target: "install"
become: true
become_user: "{{ aurweb_user }}"
@ -204,7 +204,7 @@
register: git_config
args:
chdir: "{{ aurweb_git_dir }}"
failed_when: git_config.rc == 2 # FIXME: does not work.
failed_when: git_config.rc == 2 # FIXME: does not work.
tags:
- skip_ansible_lint
@ -250,33 +250,33 @@
- name: install AUR systemd service and timers
template: src={{ item }}.j2 dest=/etc/systemd/system/{{ item }} owner=root group=root mode=0644
with_items:
- aurweb-git.service
- aurweb-git.timer
- aurweb-aurblup.service
- aurweb-aurblup.timer
- aurweb-memcached.service
- aurweb-mkpkglists.service
- aurweb-mkpkglists.timer
- aurweb-pkgmaint.service
- aurweb-pkgmaint.timer
- aurweb-popupdate.service
- aurweb-popupdate.timer
- aurweb-tuvotereminder.service
- aurweb-tuvotereminder.timer
- aurweb-usermaint.service
- aurweb-usermaint.timer
- aurweb-git.service
- aurweb-git.timer
- aurweb-aurblup.service
- aurweb-aurblup.timer
- aurweb-memcached.service
- aurweb-mkpkglists.service
- aurweb-mkpkglists.timer
- aurweb-pkgmaint.service
- aurweb-pkgmaint.timer
- aurweb-popupdate.service
- aurweb-popupdate.timer
- aurweb-tuvotereminder.service
- aurweb-tuvotereminder.timer
- aurweb-usermaint.service
- aurweb-usermaint.timer
- name: start and enable AUR systemd services and timers
service: name={{ item }} enabled=yes state=started
with_items:
- aurweb-git.timer
- aurweb-aurblup.timer
- aurweb-memcached.service
- aurweb-mkpkglists.timer
- aurweb-pkgmaint.timer
- aurweb-popupdate.timer
- aurweb-tuvotereminder.timer
- aurweb-usermaint.timer
- aurweb-git.timer
- aurweb-aurblup.timer
- aurweb-memcached.service
- aurweb-mkpkglists.timer
- aurweb-pkgmaint.timer
- aurweb-popupdate.timer
- aurweb-tuvotereminder.timer
- aurweb-usermaint.timer
- name: configure sshd
template: src=aurweb_config.j2 dest={{ sshd_includes_dir }}/aurweb_config owner=root group=root mode=0600 validate='/usr/sbin/sshd -t -f %s'

View File

@ -7,7 +7,7 @@
environment:
BORG_RELOCATED_REPO_ACCESS_IS_OK: "yes"
register: borg_list
ignore_errors: True
ignore_errors: true
loop: "{{ backup_hosts }}"
changed_when: borg_list.stdout | length > 0
@ -16,7 +16,7 @@
when: borg_list is failed
environment:
BORG_PASSPHRASE: ""
ignore_errors: True # This can sometimes fail if a backup is in progress :/
ignore_errors: true # This can sometimes fail if a backup is in progress :/
loop: "{{ backup_hosts }}"
- name: install convenience scripts
@ -34,7 +34,7 @@
- name: check whether postgres user exists
command: getent passwd postgres
register: check_postgres_user
ignore_errors: True
ignore_errors: true
changed_when: check_postgres_user.stdout | length > 0
- name: make postgres backup directory

View File

@ -36,6 +36,6 @@
authorized_key:
user: borg
key: "{{ item.stdout }}"
manage_dir: yes
manage_dir: true
key_options: "command=\"/usr/bin/borg serve --restrict-to-path {{ backup_dir }}/{{ item['item'] }}\",no-pty,no-agent-forwarding,no-port-forwarding,no-X11-forwarding,no-user-rc"
with_items: "{{ ssh_keys.results }}"

View File

@ -18,9 +18,9 @@
- name: activate letsencrypt renewal service
systemd:
name: certbot-renewal.timer
enabled: yes
enabled: true
state: started
daemon_reload: yes
daemon_reload: true
- name: open firewall holes for certbot standalone authenticator
ansible.posix.firewalld: service={{ item }} permanent=true state=enabled immediate=yes

View File

@ -4,17 +4,17 @@
systemd:
name: systemd-networkd
state: restarted
daemon_reload: yes
daemon_reload: true
- name: restart journald
systemd:
name: systemd-journald
state: restarted
daemon_reload: yes
daemon_reload: true
- name: systemd daemon-reload
systemd:
daemon_reload: yes
daemon_reload: true
- name: restart syslog-ng
service: name=syslog-ng@default state=restarted

View File

@ -91,7 +91,7 @@
sysctl:
name: net.ipv4.tcp_rmem
value: "{{ tcp_rmem }}"
sysctl_set: yes
sysctl_set: true
sysctl_file: /etc/sysctl.d/net.conf
when: tcp_rmem is defined
@ -99,7 +99,7 @@
sysctl:
name: net.ipv4.tcp_wmem
value: "{{ tcp_wmem }}"
sysctl_set: yes
sysctl_set: true
sysctl_file: /etc/sysctl.d/net.conf
when: tcp_wmem is defined

View File

@ -1,4 +1,4 @@
---
dbscripts_commit: HEAD
dbscripts_update: yes
dbscripts_update: true
dbscripts_pgp_emails: ['eschwartz@archlinux.org']

View File

@ -76,7 +76,7 @@
dbscripts_mkdirs:
pathtmpl: '/home/{user}/staging/{dirname}'
permissions: '755'
directories: ['', 'core', 'extra', 'testing', 'staging', 'community', 'community-staging', 'community-testing', 'multilib', 'multilib-staging', 'multilib-testing']
directories: ['', 'core', 'extra', 'testing', 'staging', 'community', 'community-staging', 'community-testing', 'multilib', 'multilib-staging', 'multilib-testing']
users: "{{ arch_users.keys() | list }}"
group: users
tags: ["archusers"]
@ -218,21 +218,21 @@
- name: configure svntogit git user name
command: git config --global user.name = 'svntogit'
become: yes
become: true
become_user: svntogit
register: git_config_username
changed_when: "git_config_username.rc == 0"
tags:
- skip_ansible_lint
- skip_ansible_lint
- name: configure svntogit git user email
command: git config --global user.name = 'svntogit@repos.archlinux.org'
become: yes
become: true
become_user: svntogit
register: git_config_email
changed_when: "git_config_email.rc == 0"
tags:
- skip_ansible_lint
- skip_ansible_lint
- name: template arch-svntogit
copy: src=update-repos.sh dest=/srv/svntogit/update-repos.sh owner=root group=root mode=0755
@ -245,48 +245,48 @@
with_items:
- community
- packages
become: yes
become: true
become_user: svntogit
tags:
- skip_ansible_lint
- skip_ansible_lint
- name: add svntogit public remotes
command: git remote add public git@github.com:archlinux/svntogit-{{ item }}.git chdir=/srv/svntogit/repos/{{ item }}
with_items:
- community
- packages
become: yes
become: true
become_user: svntogit
ignore_errors: yes
ignore_errors: true
register: git_public_remote
changed_when: "git_public_remote.rc == 0"
tags:
- skip_ansible_lint
- skip_ansible_lint
# The following command also serves as a way to get the data the first time the repo is set up
# The following command also serves as a way to get the data the first time the repo is set up
- name: configure svntogit pull upstream branch
command: git pull public master chdir=/srv/svntogit/repos/{{ item }}
with_items:
- community
- packages
become: yes
become: true
become_user: svntogit
register: git_pull_upstream
changed_when: "git_pull_upstream.rc == 0"
tags:
- skip_ansible_lint
- skip_ansible_lint
- name: configure svntogit push upstream branch
command: git push -u public master chdir=/srv/svntogit/repos/{{ item }}
with_items:
- community
- packages
become: yes
become: true
become_user: svntogit
register: git_push_master
changed_when: "git_push_master.rc == 0"
tags:
- skip_ansible_lint
- skip_ansible_lint
- name: fix svntogit home permissions
file: path="/srv/svntogit" state=directory owner=svntogit group=svntogit mode=0775

View File

@ -5,7 +5,7 @@
# FIXME: check directory permissions
- name: create dovecot configuration directory
file: path=/etc/dovecot state=directory owner=root group=root mode=0755
file: path=/etc/dovecot state=directory owner=root group=root mode=0755
- name: create dhparam
command: openssl dhparam -out /etc/dovecot/dh.pem 4096 creates=/etc/dovecot/dh.pem
@ -52,7 +52,7 @@
systemd:
name: "{{ item }}"
state: started
enabled: yes
daemon_reload: yes
enabled: true
daemon_reload: true
with_items:
- dovecot-cleanup.timer

View File

@ -80,6 +80,6 @@
- name: start and enable service
systemd:
name: "fail2ban.service"
enabled: yes
enabled: true
state: started
daemon-reload: yes
daemon-reload: true

View File

@ -2,8 +2,8 @@
# NOTE: hack for a systemd bug (restarting firewalld.service fails due to fail2ban.service)
# https://github.com/systemd/systemd/issues/2830
# https://bugzilla.opensuse.org/show_bug.cgi?id=1146856
#- name: restart firewalld
# service: name=firewalld state=restarted
# - name: restart firewalld
# service: name=firewalld state=restarted
- name: stop firewalld
service: name=firewalld state=stopped
listen: restart firewalld

View File

@ -20,5 +20,5 @@
ansible.posix.firewalld:
service: dhcpv6-client
state: disabled
immediate: yes
immediate: true
when: configure_firewall

View File

@ -12,7 +12,7 @@
- name: fix home permissions
file: state=directory owner=fluxbb group=fluxbb mode=0750 recurse=yes path="{{ fluxbb_dir }}"
changed_when: False
changed_when: false
- name: create uploads directory
file: state=directory owner=fluxbb group=fluxbb mode=0755 path="{{ fluxbb_dir }}/uploads"

View File

@ -31,7 +31,7 @@
- name: create setup dir with write permissions
file: state=directory owner="{{ flyspray_user }}" group="{{ flyspray_user }}" path="{{ flyspray_dir }}/setup" mode=755
when: not user_created.changed
when: falset user_created.changed
- name: clone flyspray repo
git:

View File

@ -17,7 +17,7 @@
hostname: "{{ gitlab_domain }}"
container_default_behavior: compatibility
network_mode: host
pull: yes
pull: true
restart_policy: always
env:
# See https://gitlab.com/gitlab-org/omnibus-gitlab/blob/master/files/gitlab-config-template/gitlab.rb.template

View File

@ -20,7 +20,7 @@
zone: public
permanent: true
state: enabled
immediate: yes
immediate: true
rich_rule: rule family="ipv6" destination not address="fd00::1/80" source address="fd00::/80" masquerade
when: configure_firewall
tags:

View File

@ -5,13 +5,13 @@
- name: add hedgedoc postgres db
postgresql_db: db=hedgedoc
become: yes
become: true
become_user: postgres
become_method: su
- name: add hedgedoc postgres user
postgresql_user: db=hedgedoc name=hedgedoc password={{ vault_postgres_users.hedgedoc }} encrypted=true
become: yes
become: true
become_user: postgres
become_method: su

View File

@ -51,11 +51,11 @@
unarchive:
src: /tmp/archlinux-bootstrap-{{ bootstrap_version }}-x86_64.tar.gz
dest: /tmp
remote_src: yes
remote_src: true
creates: /tmp/root.x86_64
- name: copy resolv.conf to bootstrap chroot
copy: remote_src=True src=/etc/resolv.conf dest=/tmp/root.x86_64/etc/resolv.conf owner=root group=root mode=0644
copy: remote_src=true src=/etc/resolv.conf dest=/tmp/root.x86_64/etc/resolv.conf owner=root group=root mode=0644
- name: mount /proc to bootstrap chroot
command: mount --rbind /proc /tmp/root.x86_64/proc creates=/tmp/root.x86_64/proc/uptime # noqa 303
@ -124,11 +124,11 @@
- name: configure network (static)
template: src=10-static-ethernet.network.j2 dest=/mnt/etc/systemd/network/10-static-ethernet.network owner=root group=root mode=0644
when: not dhcp|default(False)
when: not dhcp|default(false)
- name: configure network (dhcp)
template: src=10-dhcp-ethernet.network.j2 dest=/mnt/etc/systemd/network/10-dhcp-ethernet.network owner=root group=root mode=0644
when: dhcp|default(False)
when: dhcp|default(false)
- name: install hcloud-init
copy: src=hcloud-init dest=/mnt/usr/local/bin/hcloud-init owner=root group=root mode=0755

View File

@ -28,7 +28,7 @@
password: "{{ vault_keycloak_admin_password }}"
grant_type: password
client_id: admin-cli
ignore_errors: True
ignore_errors: true
register: token
- name: create an admin user
@ -49,14 +49,14 @@
- name: create postgres keycloak user
postgresql_user: name="{{ keycloak_db_user }}" password="{{ keycloak_db_password }}"
become: yes
become: true
become_user: postgres
become_method: su
no_log: True
no_log: true
- name: create keycloak db
postgresql_db: name=keycloak owner="{{ keycloak_db_user }}"
become: yes
become: true
become_user: postgres
become_method: su

View File

@ -12,4 +12,3 @@
when: archweb_site
tags:
- nginx

View File

@ -1,5 +1,5 @@
mariadb_skip_name_resolve: False
mariadb_skip_networking: False
mariadb_skip_name_resolve: false
mariadb_skip_networking: false
mariadb_key_buffer_size: '16M'
mariadb_max_allowed_packet: '16M'
@ -22,7 +22,7 @@ mariadb_innodb_log_buffer_size: '16M'
mariadb_innodb_flush_log_at_trx_commit: '1'
mariadb_innodb_stats_sample_pages: '32'
mariadb_innodb_thread_concurrency: '8'
mariadb_innodb_file_per_table: False
mariadb_innodb_file_per_table: false
mysql_backup_dir: '/root/backup-mysql'
mysql_backup_defaults: '/root/.backup-my.cnf'

View File

@ -4,33 +4,33 @@
systemd:
name: synapse
state: restarted
enabled: yes
daemon_reload: yes
enabled: true
daemon_reload: true
- name: restart pantalaimon
systemd:
name: pantalaimon
state: restarted
enabled: yes
daemon_reload: yes
enabled: true
daemon_reload: true
- name: restart mjolnir
systemd:
name: mjolnir
state: restarted
enabled: yes
daemon_reload: yes
enabled: true
daemon_reload: true
- name: restart matrix-appservice-irc
systemd:
name: matrix-appservice-irc
state: restarted
enabled: yes
daemon_reload: yes
enabled: true
daemon_reload: true
- name: restart turnserver
systemd:
name: turnserver
state: restarted
enabled: yes
daemon_reload: yes
enabled: true
daemon_reload: true

View File

@ -68,7 +68,7 @@
state: latest
extra_args: '--upgrade-strategy=eager'
virtualenv: '{{ item }}'
become: yes
become: true
become_user: synapse
become_method: sudo
with_items:
@ -82,7 +82,7 @@
state: latest
extra_args: '--upgrade-strategy=eager'
virtualenv: /var/lib/synapse/venv
become: yes
become: true
become_user: synapse
become_method: sudo
register: synapse_pip
@ -96,7 +96,7 @@
state: latest
extra_args: '--upgrade-strategy=eager'
virtualenv: /var/lib/synapse/venv-pantalaimon
become: yes
become: true
become_user: synapse
become_method: sudo
notify:
@ -107,7 +107,7 @@
repo: https://github.com/matrix-org/mjolnir
dest: /var/lib/synapse/mjolnir
version: v0.1.17
become: yes
become: true
become_user: synapse
become_method: sudo
register: mjolnir_git
@ -117,7 +117,7 @@
- name: install mjolnir
community.general.yarn:
path: /var/lib/synapse/mjolnir
become: yes
become: true
become_user: synapse
become_method: sudo
when: mjolnir_git.changed
@ -137,7 +137,7 @@
- /var/lib/synapse/mjolnir/synapse_antispam
state: latest
virtualenv: /var/lib/synapse/venv
become: yes
become: true
become_user: synapse
become_method: sudo
when: synapse_pip.changed or mjolnir_git.changed
@ -149,7 +149,7 @@
repo: https://github.com/matrix-org/matrix-appservice-irc
dest: /var/lib/synapse/matrix-appservice-irc
version: 0.23.0
become: yes
become: true
become_user: synapse
become_method: sudo
register: irc_git
@ -159,7 +159,7 @@
- name: install matrix-appservice-irc
npm:
path: /var/lib/synapse/matrix-appservice-irc
become: yes
become: true
become_user: synapse
become_method: sudo
when: irc_git.changed
@ -171,19 +171,19 @@
- name: add synapse postgres db
postgresql_db: db=synapse
become: yes
become: true
become_user: postgres
become_method: su
- name: add synapse postgres user
postgresql_user: db=synapse user=synapse password={{ vault_postgres_users.synapse }}
become: yes
become: true
become_user: postgres
become_method: su
- name: add irc postgres db
postgresql_db: db=irc
become: yes
become: true
become_user: postgres
become_method: su

View File

@ -3,7 +3,7 @@ patchwork_dir: '/srv/http/patchwork'
patchwork_domain: 'patchwork.archlinux.org'
patchwork_nginx_conf: '/etc/nginx/nginx.d/patchwork.conf'
patchwork_forced_deploy: false
patchwork_admins: ["('Giancarlo Razzolini', 'grazzolini@archlinux.org')", "('Frederik Schwan', "freswa@archlinux.org")"]
patchwork_admins: ["('Giancarlo Razzolini', 'grazzolini@archlinux.org')", "('Frederik Schwan', 'freswa@archlinux.org')"]
patchwork_version: 'v3.0.0'
patchwork_from_email: 'Arch Linux Patchwork <patchwork@patchwork.archlinux.org>'
patchwork_notification_frequency: '10m'

View File

@ -2,7 +2,7 @@
- name: daemon reload
systemd:
daemon-reload: yes
daemon-reload: true
- name: restart patchwork memcached
service: name=patchwork-memcached state=restarted

View File

@ -128,9 +128,9 @@
- name: start and enable patchwork memcached service and notification timer
systemd:
name: "{{ item }}"
enabled: yes
enabled: true
state: started
daemon_reload: yes
daemon_reload: true
with_items:
- patchwork-memcached.service
- patchwork-notification.timer

View File

@ -1,4 +1,4 @@
---
- name: daemon reload
systemd:
daemon-reload: yes
daemon-reload: true

View File

@ -1,4 +1,4 @@
---
- name: daemon reload
systemd:
daemon-reload: yes
daemon-reload: true

View File

@ -33,9 +33,9 @@
- name: start and enable pkgfile and phrikservice
systemd:
name: "{{ item }}"
enabled: yes
enabled: true
state: started
daemon_reload: yes
daemon_reload: true
with_items:
- pkgfile-update.timer
- phrik.service

View File

@ -99,8 +99,8 @@
password: "{{ postfix_relay_password | password_hash('sha512') }}"
shell: /sbin/nologin
update_password: always
home: /home/"{{ inventory_hostname }}" # Set home directory so shadow.service does not fail
create_home: yes
home: /home/"{{ inventory_hostname }}" # Set home directory so shadow.service does not fail
create_home: true
- name: open firewall holes
ansible.posix.firewalld: service={{ item }} permanent=true state=enabled immediate=yes
@ -111,5 +111,3 @@
when: postfix_smtpd_public and configure_firewall
tags:
- firewall

View File

@ -2,4 +2,3 @@
- name: reload postfwd
service: name=postfwd state=reloaded

View File

@ -10,4 +10,3 @@
- name: start and enable postfwd
service: name=postfwd enabled=yes state=started

View File

@ -20,7 +20,7 @@
when: filesystem == "btrfs"
- name: initialize postgres
become: yes
become: true
become_user: postgres
become_method: su
command: initdb --locale en_US.UTF-8 -E UTF8 -D '/var/lib/postgres/data'
@ -58,7 +58,7 @@
- name: set postgres user password
postgresql_user: name=postgres password={{ vault_postgres_users.postgres }} encrypted=yes
become: yes
become: true
become_user: postgres
become_method: su

View File

@ -1,7 +1,7 @@
groups:
- name: node_common
interval: 60s
rules:
- name: node_common
interval: 60s
rules:
- alert: HostHighCpuLoad
expr: 100 - (avg by(instance) (irate(node_cpu_seconds_total{mode="idle",instance!~"build.archlinux.org",instance!~"repro1.pkgbuild.com",instance!~"repro2.pkgbuild.com"}[5m])) * 100) > 80
@ -93,360 +93,360 @@ groups:
summary: "Host OOM kill detected (instance {{ $labels.instance }})"
description: "OOM kill detected\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
- name: prometheus
interval: 60s
rules:
- alert: PrometheusTargetMissing
expr: up == 0
for: 5m
labels:
severity: critical
annotations:
summary: "Prometheus target missing (instance {{ $labels.instance }})"
description: "A Prometheus target {{ $value }} has disappeared. An exporter might have crashed."
- alert: PrometheusTooManyRestarts
expr: changes(process_start_time_seconds{job=~"prometheus|pushgateway|alertmanager"}[15m]) > 2
for: 5m
labels:
severity: warning
annotations:
summary: "Prometheus too many restarts (instance {{ $labels.instance }})"
description: "Prometheus has restarted more than twice in the last 15 minutes. It might be crashlooping.\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
- alert: PrometheusNotConnectedToAlertmanager
expr: prometheus_notifications_alertmanagers_discovered < 1
for: 5m
labels:
severity: critical
annotations:
summary: "Prometheus not connected to alertmanager (instance {{ $labels.instance }})"
description: "Prometheus cannot connect the alertmanager\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
- alert: PrometheusRuleEvaluationFailures
expr: increase(prometheus_rule_evaluation_failures_total[3m]) > 0
for: 5m
labels:
severity: critical
annotations:
summary: "Prometheus rule evaluation failures (instance {{ $labels.instance }})"
description: "Prometheus encountered {{ $value }} rule evaluation failures, leading to potentially ignored alerts.\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
- alert: PrometheusTemplateTextExpansionFailures
expr: increase(prometheus_template_text_expansion_failures_total[3m]) > 0
for: 5m
labels:
severity: critical
annotations:
summary: "Prometheus template text expansion failures (instance {{ $labels.instance }})"
description: "Prometheus encountered {{ $value }} template text expansion failures\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
- alert: PrometheusNotificationsBacklog
expr: min_over_time(prometheus_notifications_queue_length[10m]) > 0
for: 5m
labels:
severity: warning
annotations:
summary: "Prometheus notifications backlog (instance {{ $labels.instance }})"
description: "The Prometheus notification queue has not been empty for 10 minutes\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
- alert: PrometheusAlertmanagerNotificationFailing
expr: rate(alertmanager_notifications_failed_total[1m]) > 0
for: 5m
labels:
severity: critical
annotations:
summary: "Prometheus AlertManager notification failing (instance {{ $labels.instance }})"
description: "Alertmanager is failing sending notifications\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
- alert: PrometheusTargetScrapingSlow
expr: prometheus_target_interval_length_seconds{quantile="0.9"} > 60
for: 5m
labels:
severity: warning
annotations:
summary: "Prometheus target scraping slow (instance {{ $labels.instance }})"
description: "Prometheus is scraping exporters slowly\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
- alert: PrometheusLargeScrape
expr: increase(prometheus_target_scrapes_exceeded_sample_limit_total[10m]) > 10
for: 5m
labels:
severity: warning
annotations:
summary: "Prometheus large scrape (instance {{ $labels.instance }})"
description: "Prometheus has many scrapes that exceed the sample limit\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
- alert: PrometheusTsdbCheckpointCreationFailures
expr: increase(prometheus_tsdb_checkpoint_creations_failed_total[3m]) > 0
for: 5m
labels:
severity: critical
annotations:
summary: "Prometheus TSDB checkpoint creation failures (instance {{ $labels.instance }})"
description: "Prometheus encountered {{ $value }} checkpoint creation failures\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
- alert: PrometheusTsdbCompactionsFailed
expr: increase(prometheus_tsdb_compactions_failed_total[3m]) > 0
for: 5m
labels:
severity: critical
annotations:
summary: "Prometheus TSDB compactions failed (instance {{ $labels.instance }})"
description: "Prometheus encountered {{ $value }} TSDB compactions failures\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
- alert: PrometheusTsdbWalCorruptions
expr: increase(prometheus_tsdb_wal_corruptions_total[3m]) > 0
for: 5m
labels:
severity: critical
annotations:
summary: "Prometheus TSDB WAL corruptions (instance {{ $labels.instance }})"
description: "Prometheus encountered {{ $value }} TSDB WAL corruptions\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
- alert: PrometheusTsdbWalTruncationsFailed
expr: increase(prometheus_tsdb_wal_truncations_failed_total[3m]) > 0
for: 5m
labels:
severity: critical
annotations:
summary: "Prometheus TSDB WAL truncations failed (instance {{ $labels.instance }})"
description: "Prometheus encountered {{ $value }} TSDB WAL truncation failures\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
- name: prometheus
interval: 60s
rules:
- alert: PrometheusTargetMissing
expr: up == 0
for: 5m
labels:
severity: critical
annotations:
summary: "Prometheus target missing (instance {{ $labels.instance }})"
description: "A Prometheus target {{ $value }} has disappeared. An exporter might have crashed."
- alert: PrometheusTooManyRestarts
expr: changes(process_start_time_seconds{job=~"prometheus|pushgateway|alertmanager"}[15m]) > 2
for: 5m
labels:
severity: warning
annotations:
summary: "Prometheus too many restarts (instance {{ $labels.instance }})"
description: "Prometheus has restarted more than twice in the last 15 minutes. It might be crashlooping.\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
- alert: PrometheusNotConnectedToAlertmanager
expr: prometheus_notifications_alertmanagers_discovered < 1
for: 5m
labels:
severity: critical
annotations:
summary: "Prometheus not connected to alertmanager (instance {{ $labels.instance }})"
description: "Prometheus cannot connect the alertmanager\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
- alert: PrometheusRuleEvaluationFailures
expr: increase(prometheus_rule_evaluation_failures_total[3m]) > 0
for: 5m
labels:
severity: critical
annotations:
summary: "Prometheus rule evaluation failures (instance {{ $labels.instance }})"
description: "Prometheus encountered {{ $value }} rule evaluation failures, leading to potentially ignored alerts.\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
- alert: PrometheusTemplateTextExpansionFailures
expr: increase(prometheus_template_text_expansion_failures_total[3m]) > 0
for: 5m
labels:
severity: critical
annotations:
summary: "Prometheus template text expansion failures (instance {{ $labels.instance }})"
description: "Prometheus encountered {{ $value }} template text expansion failures\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
- alert: PrometheusNotificationsBacklog
expr: min_over_time(prometheus_notifications_queue_length[10m]) > 0
for: 5m
labels:
severity: warning
annotations:
summary: "Prometheus notifications backlog (instance {{ $labels.instance }})"
description: "The Prometheus notification queue has not been empty for 10 minutes\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
- alert: PrometheusAlertmanagerNotificationFailing
expr: rate(alertmanager_notifications_failed_total[1m]) > 0
for: 5m
labels:
severity: critical
annotations:
summary: "Prometheus AlertManager notification failing (instance {{ $labels.instance }})"
description: "Alertmanager is failing sending notifications\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
- alert: PrometheusTargetScrapingSlow
expr: prometheus_target_interval_length_seconds{quantile="0.9"} > 60
for: 5m
labels:
severity: warning
annotations:
summary: "Prometheus target scraping slow (instance {{ $labels.instance }})"
description: "Prometheus is scraping exporters slowly\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
- alert: PrometheusLargeScrape
expr: increase(prometheus_target_scrapes_exceeded_sample_limit_total[10m]) > 10
for: 5m
labels:
severity: warning
annotations:
summary: "Prometheus large scrape (instance {{ $labels.instance }})"
description: "Prometheus has many scrapes that exceed the sample limit\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
- alert: PrometheusTsdbCheckpointCreationFailures
expr: increase(prometheus_tsdb_checkpoint_creations_failed_total[3m]) > 0
for: 5m
labels:
severity: critical
annotations:
summary: "Prometheus TSDB checkpoint creation failures (instance {{ $labels.instance }})"
description: "Prometheus encountered {{ $value }} checkpoint creation failures\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
- alert: PrometheusTsdbCompactionsFailed
expr: increase(prometheus_tsdb_compactions_failed_total[3m]) > 0
for: 5m
labels:
severity: critical
annotations:
summary: "Prometheus TSDB compactions failed (instance {{ $labels.instance }})"
description: "Prometheus encountered {{ $value }} TSDB compactions failures\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
- alert: PrometheusTsdbWalCorruptions
expr: increase(prometheus_tsdb_wal_corruptions_total[3m]) > 0
for: 5m
labels:
severity: critical
annotations:
summary: "Prometheus TSDB WAL corruptions (instance {{ $labels.instance }})"
description: "Prometheus encountered {{ $value }} TSDB WAL corruptions\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
- alert: PrometheusTsdbWalTruncationsFailed
expr: increase(prometheus_tsdb_wal_truncations_failed_total[3m]) > 0
for: 5m
labels:
severity: critical
annotations:
summary: "Prometheus TSDB WAL truncations failed (instance {{ $labels.instance }})"
description: "Prometheus encountered {{ $value }} TSDB WAL truncation failures\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
- name: pacman
interval: 2m
rules:
- alert: pacman_updates_pending
expr: pacman_updates_pending > 50
for: 15m
labels:
severity: warning
annotations:
description: 'host {{ $labels.instance }} has out of date packages'
summary: '{{ $labels.instance }} has {{ $value }} > 50 out of date packages'
- name: pacman
interval: 2m
rules:
- alert: pacman_updates_pending
expr: pacman_updates_pending > 50
for: 15m
labels:
severity: warning
annotations:
description: 'host {{ $labels.instance }} has out of date packages'
summary: '{{ $labels.instance }} has {{ $value }} > 50 out of date packages'
- name: btrfs
interval: 2m
rules:
- alert: btrfs_corruption_errs
expr: btrfs_corruption_errs > 1
for: 15m
labels:
severity: warning
annotations:
description: 'host {{ $labels.instance }} btrfs corruption errors'
summary: '{{ $labels.instance }} has {{ $value }} btrfs_corruption_errs'
- alert: btrfs_write_io_errs
expr: btrfs_write_io_errs > 1
for: 15m
labels:
severity: warning
annotations:
description: 'host {{ $labels.instance }} btrfs write_io errors'
summary: '{{ $labels.instance }} has {{ $value }} btrfs_write_io_errs'
- alert: btrfs_read_io_errs
expr: btrfs_read_io_errs > 1
for: 15m
labels:
severity: warning
annotations:
description: 'host {{ $labels.instance }} btrfs read_io errors'
summary: '{{ $labels.instance }} has {{ $value }} btrfs_read_io_errs'
- alert: btrfs_flush_io_errs
expr: btrfs_flush_io_errs > 1
for: 15m
labels:
severity: warning
annotations:
description: 'host {{ $labels.instance }} btrfs flush_io errors'
summary: '{{ $labels.instance }} has {{ $value }} btrfs_flush_io_errs'
- alert: btrfs_corruption_errs
expr: btrfs_corruption_errs > 1
for: 15m
labels:
severity: warning
annotations:
description: 'host {{ $labels.instance }} btrfs corruption errors'
summary: '{{ $labels.instance }} has {{ $value }} btrfs_corruption_errs'
- name: btrfs
interval: 2m
rules:
- alert: btrfs_corruption_errs
expr: btrfs_corruption_errs > 1
for: 15m
labels:
severity: warning
annotations:
description: 'host {{ $labels.instance }} btrfs corruption errors'
summary: '{{ $labels.instance }} has {{ $value }} btrfs_corruption_errs'
- alert: btrfs_write_io_errs
expr: btrfs_write_io_errs > 1
for: 15m
labels:
severity: warning
annotations:
description: 'host {{ $labels.instance }} btrfs write_io errors'
summary: '{{ $labels.instance }} has {{ $value }} btrfs_write_io_errs'
- alert: btrfs_read_io_errs
expr: btrfs_read_io_errs > 1
for: 15m
labels:
severity: warning
annotations:
description: 'host {{ $labels.instance }} btrfs read_io errors'
summary: '{{ $labels.instance }} has {{ $value }} btrfs_read_io_errs'
- alert: btrfs_flush_io_errs
expr: btrfs_flush_io_errs > 1
for: 15m
labels:
severity: warning
annotations:
description: 'host {{ $labels.instance }} btrfs flush_io errors'
summary: '{{ $labels.instance }} has {{ $value }} btrfs_flush_io_errs'
- alert: btrfs_corruption_errs
expr: btrfs_corruption_errs > 1
for: 15m
labels:
severity: warning
annotations:
description: 'host {{ $labels.instance }} btrfs corruption errors'
summary: '{{ $labels.instance }} has {{ $value }} btrfs_corruption_errs'
- name: borg
interval: 60s
rules:
- alert: BorgHetznerMissingBackup
expr: time() - borg_hetzner_last_archive_timestamp > 86400 * 1.2
for: 2m
labels:
severity: critical
annotations:
summary: 'Borg Hetzner missing backup (instance {{ $labels.instance }})'
description: 'Borg has not backuped for more than 24 hours. Last backup made on {{ $value | humanizeTimestamp }}'
- alert: BorgOffsiteMissingBackup
expr: time() - borg_offsite_last_archive_timestamp > 86400 * 1.2
for: 2m
labels:
severity: critical
annotations:
summary: 'Borg Offsite missing backup (instance {{ $labels.instance }})'
description: 'Borg has not backuped for more than 24 hours. Last backup made on {{ $value | humanizeTimestamp }}'
- name: borg
interval: 60s
rules:
- alert: BorgHetznerMissingBackup
expr: time() - borg_hetzner_last_archive_timestamp > 86400 * 1.2
for: 2m
labels:
severity: critical
annotations:
summary: 'Borg Hetzner missing backup (instance {{ $labels.instance }})'
description: 'Borg has not backuped for more than 24 hours. Last backup made on {{ $value | humanizeTimestamp }}'
- alert: BorgOffsiteMissingBackup
expr: time() - borg_offsite_last_archive_timestamp > 86400 * 1.2
for: 2m
labels:
severity: critical
annotations:
summary: 'Borg Offsite missing backup (instance {{ $labels.instance }})'
description: 'Borg has not backuped for more than 24 hours. Last backup made on {{ $value | humanizeTimestamp }}'
- name: systemd_unit
interval: 15s
rules:
- alert: systemd_unit_failed
expr: |
node_systemd_unit_state{state="failed"} > 0
for: 3m
labels:
severity: critical
annotations:
description: 'Instance {{ $labels.instance }}: Service {{ $labels.name }} failed'
summary: 'Systemd unit failed'
- name: systemd_unit
interval: 15s
rules:
- alert: systemd_unit_failed
expr: |
node_systemd_unit_state{state="failed"} > 0
for: 3m
labels:
severity: critical
annotations:
description: 'Instance {{ $labels.instance }}: Service {{ $labels.name }} failed'
summary: 'Systemd unit failed'
- alert: systemd_unit_flapping
expr: |
changes(node_systemd_unit_state{state="active"}[5m]) > 5 or (changes(node_systemd_unit_state{state="active"}[60m]) > 15 unless changes(node_systemd_unit_state{state="active"}[30m]) < 7)
labels:
severity: critical
annotations:
description: 'Instance {{ $labels.instance }}: Service {{ $labels.name }} flapping'
summary: 'Systemd unit flapping'
- alert: systemd_unit_flapping
expr: |
changes(node_systemd_unit_state{state="active"}[5m]) > 5 or (changes(node_systemd_unit_state{state="active"}[60m]) > 15 unless changes(node_systemd_unit_state{state="active"}[30m]) < 7)
labels:
severity: critical
annotations:
description: 'Instance {{ $labels.instance }}: Service {{ $labels.name }} flapping'
summary: 'Systemd unit flapping'
- name: gitlab
interval: 15s
rules:
- alert: ServiceDown
expr: avg_over_time(up[5m]) * 100 < 50
annotations:
description: The service {{ $labels.job }} instance {{ $labels.instance }} is
not responding for more than 50% of the time for 5 minutes.
summary: The service {{ $labels.job }} is not responding
- alert: RedisDown
expr: avg_over_time(redis_up[5m]) * 100 < 50
annotations:
description: The Redis service {{ $labels.job }} instance {{ $labels.instance
}} is not responding for more than 50% of the time for 5 minutes.
summary: The Redis service {{ $labels.job }} is not responding
- alert: PostgresDown
expr: avg_over_time(pg_up[5m]) * 100 < 50
annotations:
description: The Postgres service {{ $labels.job }} instance {{ $labels.instance
}} is not responding for more than 50% of the time for 5 minutes.
summary: The Postgres service {{ $labels.job }} is not responding
- alert: UnicornQueueing
expr: avg_over_time(unicorn_queued_connections[30m]) > 1
annotations:
description: Unicorn instance {{ $labels.instance }} is queueing requests with
an average of {{ $value | printf "%.1f" }} over the last 30 minutes.
summary: Unicorn is queueing requests
- alert: PumaQueueing
expr: avg_over_time(puma_queued_connections[30m]) > 1
annotations:
description: Puma instance {{ $labels.instance }} is queueing requests with
an average of {{ $value | printf "%.1f" }} over the last 30 minutes.
summary: Puma is queueing requests
- alert: HighUnicornUtilization
expr: instance:unicorn_utilization:ratio * 100 > 90
for: 60m
annotations:
description: Unicorn instance {{ $labels.instance }} has more than 90% worker utilization ({{ $value | printf "%.1f" }}%) over the last 60 minutes.
summary: Unicorn is has high utilization
- alert: HighPumaUtilization
expr: instance:puma_utilization:ratio * 100 > 90
for: 60m
annotations:
description: Puma instance {{ $labels.instance }} has more than 90% thread utilization ({{ $value | printf "%.1f" }}%) over the last 60 minutes.
summary: Puma is has high utilization
- alert: SidekiqJobsQueuing
expr: sum by (name) (sidekiq_queue_size) > 0
for: 60m
annotations:
summary: Sidekiq has jobs queued
description: Sidekiq queue {{ $labels.name }} has {{ $value }} jobs queued for 60 minutes.
- alert: HighgRPCResourceExhaustedRate
expr: >
sum without (grpc_code) (
job_grpc:grpc_server_handled_total:rate5m{grpc_code="ResourceExhausted"}
) /
sum without (grpc_code) (
job_grpc:grpc_server_handled_total:rate5m
) * 100 > 1
for: 60m
annotations:
summary: High gRPC ResourceExhausted error rate
description: gRPC is returning more than 1% ({{ $value | printf "%.1f" }}%) ResourceExhausted errors over the last 60 minutes.
- alert: PostgresDatabaseDeadlocks
expr: increase(pg_stat_database_deadlocks[5m]) > 0
annotations:
summary: Postgres database has deadlocks
description: Postgres database {{ $labels.instance }} had {{ $value | printf "%d" }} deadlocks in the last 5 minutes.
- alert: PostgresDatabaseDeadlockCancels
expr: increase(pg_stat_database_deadlocks[5m]) > 0
annotations:
summary: Postgres database has queries canceled due to deadlocks
description: Postgres database {{ $labels.instance }} had {{ $value | printf "%d" }} queries canceled due to deadlocks in the last 5 minutes.
# Low-traffic - < 10 QPS (600 RPM)
- alert: WorkhorseHighErrorRate
expr: >
(
sum without (job, code) (
job_route_method_code:gitlab_workhorse_http_request_duration_seconds_count:rate5m{code=~"5.."}
) /
sum without (job,code) (
job_route_method_code:gitlab_workhorse_http_request_duration_seconds_count:rate5m
) < 10
) * 100 > 50
annotations:
summary: Workhorse has high error rates
description: Workhorse route {{ $labels.route }} method {{ $labels.method }} has more than 50% errors ({{ $value | printf "%.1f" }}%) for the last 60 minutes.
# High-traffic - >= 10 QPS (600 RPM)
- alert: WorkhorseHighErrorRate
expr: >
(
sum without (job, code) (
job_route_method_code:gitlab_workhorse_http_request_duration_seconds_count:rate5m{code=~"5.."}
) /
sum without (job,code) (
job_route_method_code:gitlab_workhorse_http_request_duration_seconds_count:rate5m
) > 10
) * 100 > 10
annotations:
summary: Workhorse has high error rates
description: Workhorse route {{ $labels.route }} method {{ $labels.method }} has more than 10% errors ({{ $value | printf "%.1f" }}%) for the last 60 minutes.
- name: gitlab
interval: 15s
rules:
- alert: ServiceDown
expr: avg_over_time(up[5m]) * 100 < 50
annotations:
description: The service {{ $labels.job }} instance {{ $labels.instance }} is
not responding for more than 50% of the time for 5 minutes.
summary: The service {{ $labels.job }} is not responding
- alert: RedisDown
expr: avg_over_time(redis_up[5m]) * 100 < 50
annotations:
description: The Redis service {{ $labels.job }} instance {{ $labels.instance
}} is not responding for more than 50% of the time for 5 minutes.
summary: The Redis service {{ $labels.job }} is not responding
- alert: PostgresDown
expr: avg_over_time(pg_up[5m]) * 100 < 50
annotations:
description: The Postgres service {{ $labels.job }} instance {{ $labels.instance
}} is not responding for more than 50% of the time for 5 minutes.
summary: The Postgres service {{ $labels.job }} is not responding
- alert: UnicornQueueing
expr: avg_over_time(unicorn_queued_connections[30m]) > 1
annotations:
description: Unicorn instance {{ $labels.instance }} is queueing requests with
an average of {{ $value | printf "%.1f" }} over the last 30 minutes.
summary: Unicorn is queueing requests
- alert: PumaQueueing
expr: avg_over_time(puma_queued_connections[30m]) > 1
annotations:
description: Puma instance {{ $labels.instance }} is queueing requests with
an average of {{ $value | printf "%.1f" }} over the last 30 minutes.
summary: Puma is queueing requests
- alert: HighUnicornUtilization
expr: instance:unicorn_utilization:ratio * 100 > 90
for: 60m
annotations:
description: Unicorn instance {{ $labels.instance }} has more than 90% worker utilization ({{ $value | printf "%.1f" }}%) over the last 60 minutes.
summary: Unicorn is has high utilization
- alert: HighPumaUtilization
expr: instance:puma_utilization:ratio * 100 > 90
for: 60m
annotations:
description: Puma instance {{ $labels.instance }} has more than 90% thread utilization ({{ $value | printf "%.1f" }}%) over the last 60 minutes.
summary: Puma is has high utilization
- alert: SidekiqJobsQueuing
expr: sum by (name) (sidekiq_queue_size) > 0
for: 60m
annotations:
summary: Sidekiq has jobs queued
description: Sidekiq queue {{ $labels.name }} has {{ $value }} jobs queued for 60 minutes.
- alert: HighgRPCResourceExhaustedRate
expr: >
sum without (grpc_code) (
job_grpc:grpc_server_handled_total:rate5m{grpc_code="ResourceExhausted"}
) /
sum without (grpc_code) (
job_grpc:grpc_server_handled_total:rate5m
) * 100 > 1
for: 60m
annotations:
summary: High gRPC ResourceExhausted error rate
description: gRPC is returning more than 1% ({{ $value | printf "%.1f" }}%) ResourceExhausted errors over the last 60 minutes.
- alert: PostgresDatabaseDeadlocks
expr: increase(pg_stat_database_deadlocks[5m]) > 0
annotations:
summary: Postgres database has deadlocks
description: Postgres database {{ $labels.instance }} had {{ $value | printf "%d" }} deadlocks in the last 5 minutes.
- alert: PostgresDatabaseDeadlockCancels
expr: increase(pg_stat_database_deadlocks[5m]) > 0
annotations:
summary: Postgres database has queries canceled due to deadlocks
description: Postgres database {{ $labels.instance }} had {{ $value | printf "%d" }} queries canceled due to deadlocks in the last 5 minutes.
# Low-traffic - < 10 QPS (600 RPM)
- alert: WorkhorseHighErrorRate
expr: >
(
sum without (job, code) (
job_route_method_code:gitlab_workhorse_http_request_duration_seconds_count:rate5m{code=~"5.."}
) /
sum without (job,code) (
job_route_method_code:gitlab_workhorse_http_request_duration_seconds_count:rate5m
) < 10
) * 100 > 50
annotations:
summary: Workhorse has high error rates
description: Workhorse route {{ $labels.route }} method {{ $labels.method }} has more than 50% errors ({{ $value | printf "%.1f" }}%) for the last 60 minutes.
# High-traffic - >= 10 QPS (600 RPM)
- alert: WorkhorseHighErrorRate
expr: >
(
sum without (job, code) (
job_route_method_code:gitlab_workhorse_http_request_duration_seconds_count:rate5m{code=~"5.."}
) /
sum without (job,code) (
job_route_method_code:gitlab_workhorse_http_request_duration_seconds_count:rate5m
) > 10
) * 100 > 10
annotations:
summary: Workhorse has high error rates
description: Workhorse route {{ $labels.route }} method {{ $labels.method }} has more than 10% errors ({{ $value | printf "%.1f" }}%) for the last 60 minutes.
- name: blackbox
interval: 15s
rules:
- alert: BlackboxProbeFailed
expr: probe_success == 0
for: 5m
labels:
severity: critical
annotations:
summary: "Blackbox probe failed (instance {{ $labels.instance }})"
description: "Probe failed\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
- alert: BlackboxProbeHttpFailure
expr: probe_http_status_code <= 199 OR probe_http_status_code >= 400
for: 5m
labels:
severity: critical
annotations:
summary: "Blackbox probe HTTP failure (instance {{ $labels.instance }})"
description: "HTTP status code is not 200-399\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
- alert: BlackboxSslCertificateWillExpireSoon
expr: probe_ssl_earliest_cert_expiry - time() < 86400 * 25
for: 5m
labels:
severity: critical
annotations:
summary: "Blackbox SSL certificate will expire soon (instance {{ $labels.instance }})"
description: "SSL certificate expires in 25 days\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
- name: blackbox
interval: 15s
rules:
- alert: BlackboxProbeFailed
expr: probe_success == 0
for: 5m
labels:
severity: critical
annotations:
summary: "Blackbox probe failed (instance {{ $labels.instance }})"
description: "Probe failed\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
- alert: BlackboxProbeHttpFailure
expr: probe_http_status_code <= 199 OR probe_http_status_code >= 400
for: 5m
labels:
severity: critical
annotations:
summary: "Blackbox probe HTTP failure (instance {{ $labels.instance }})"
description: "HTTP status code is not 200-399\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
- alert: BlackboxSslCertificateWillExpireSoon
expr: probe_ssl_earliest_cert_expiry - time() < 86400 * 25
for: 5m
labels:
severity: critical
annotations:
summary: "Blackbox SSL certificate will expire soon (instance {{ $labels.instance }})"
description: "SSL certificate expires in 25 days\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
- name: rebuilderd
interval: 15m
rules:
- alert: RebuilderdQueueNotEmpty
expr: rebuilderd_queue_length > 2000
for: 24h
labels:
severity: warning
service: rebuilderd
annotations:
summary: "Rebuilderd queue length is not empty {{ $labels.instance }})"
description: "Rebuilderd's queue length is now: {{ $value }}"
- alert: RebuilderdWorkersOffline
expr: rebuilderd_workers < 3
for: 5m
labels:
severity: warning
service: rebuilderd
annotations:
summary: "Rebuilderd workers offline {{ $labels.instance }})"
description: "Not all rebuilder-workers are online, currently {{ $value }} workers are online"
- name: rebuilderd
interval: 15m
rules:
- alert: RebuilderdQueueNotEmpty
expr: rebuilderd_queue_length > 2000
for: 24h
labels:
severity: warning
service: rebuilderd
annotations:
summary: "Rebuilderd queue length is not empty {{ $labels.instance }})"
description: "Rebuilderd's queue length is now: {{ $value }}"
- alert: RebuilderdWorkersOffline
expr: rebuilderd_workers < 3
for: 5m
labels:
severity: warning
service: rebuilderd
annotations:
summary: "Rebuilderd workers offline {{ $labels.instance }})"
description: "Not all rebuilder-workers are online, currently {{ $value }} workers are online"

View File

@ -5,18 +5,18 @@
- name: add quassel postgres db
postgresql_db: db=quassel
become: yes
become: true
become_user: postgres
become_method: su
- name: add quassel postgres user
postgresql_user: db=quassel name=quassel password={{ vault_postgres_users.quassel }} encrypted=true
become: yes
become: true
become_user: postgres
become_method: su
- name: initialize quassel
become: yes
become: true
become_user: quassel
become_method: sudo
expect:

View File

@ -1,8 +1,7 @@
# Every entry creates a redirect listening on port 80 and 443 with the following parameters:
# - domain: the domain to listen on
# - to: the redirect target as defined by the nginx return statement
# - type: HTTP status code to use (302 = temporary redirect, 301 = permanent redirect
#)
# - type: HTTP status code to use (302 = temporary redirect, 301 = permanent redirect)
redirects:
- mailman:
domain: mailman.archlinux.org

View File

@ -21,7 +21,7 @@
delegate_to: localhost
- name: fill tempfile
copy: content="{{ lookup('template', 'authorized_keys.j2') }}" dest="{{ tempfile.path }}" mode=0644 # noqa 208
copy: content="{{ lookup('template', 'authorized_keys.j2') }}" dest="{{ tempfile.path }}" mode=0644 # noqa 208
delegate_to: localhost
- name: upload authorized_keys file

View File

@ -102,7 +102,7 @@
- name: start and enable security-tracker timer
systemd:
name: security-tracker-update.timer
enabled: yes
enabled: true
state: started
daemon_reload: yes
daemon_reload: true
when: maintenance is not defined

View File

@ -14,8 +14,8 @@
- name: start and enable syncarchive units
systemd:
name: "{{ item }}"
enabled: yes
enabled: true
state: started
daemon_reload: yes
daemon_reload: true
with_items:
- syncarchive.timer

View File

@ -25,9 +25,9 @@
- name: start and enable syncrepo units
systemd:
name: "{{ item }}"
enabled: yes
enabled: true
state: started
daemon_reload: yes
daemon_reload: true
with_items:
- syncrepo.timer
- rsyncd.socket

View File

@ -2,7 +2,7 @@
- name: create terraform state db
postgresql_db: db="{{ terraform_db }}"
become: yes
become: true
become_user: postgres
become_method: su
@ -13,6 +13,6 @@
password: "{{ vault_terraform_db_password }}"
encrypted: true
priv: "ALL"
become: yes
become: true
become_user: postgres
become_method: su