diff options
author | makefu <makefu@nixos.dev> | 2016-01-18 12:50:20 +0100 |
---|---|---|
committer | makefu <makefu@nixos.dev> | 2016-01-18 12:50:20 +0100 |
commit | f4754010336a1d7c876bc6797a44f30e3d4b4ead (patch) | |
tree | de4dff5340b76d970cc404146e688726e4446e0f /krebs | |
parent | b86daca11669019d3c2218e623bfb57b5a8033d7 (diff) | |
parent | de891cf43181d28cbc9526993df4e55022d230da (diff) |
Merge branch 'master' of gum:stockholm
Diffstat (limited to 'krebs')
36 files changed, 1312 insertions, 156 deletions
diff --git a/krebs/3modules/Reaktor.nix b/krebs/3modules/Reaktor.nix index 1ec49b81e..92400139c 100644 --- a/krebs/3modules/Reaktor.nix +++ b/krebs/3modules/Reaktor.nix @@ -1,25 +1,15 @@ -{ config, pkgs,lib, ... }: - +{ config, lib, pkgs, ... }: +with lib; let - inherit (lib) - mkIf - mkOption - types - singleton - isString - optionalString - concatStrings - escapeShellArg - ; - ReaktorConfig = pkgs.writeText "config.py" '' ${if (isString cfg.overrideConfig ) then '' # Overriden Config ${cfg.overrideConfig} '' else ""} ## Extra Config + ${concatStringsSep "\n" (map (plug: plug.config) cfg.plugins)} ${cfg.extraConfig} ''; cfg = config.krebs.Reaktor; @@ -46,7 +36,6 @@ let ''; }; - overrideConfig = mkOption { default = null; type = types.nullOr types.str; @@ -55,6 +44,9 @@ let Reaktor default cfg can be retrieved via `reaktor get-config` ''; }; + plugins = mkOption { + default = [pkgs.ReaktorPlugins.nixos-version]; + }; extraConfig = mkOption { default = ""; type = types.string; @@ -62,6 +54,14 @@ let configuration appended to the default or overridden configuration ''; }; + + workdir = mkOption { + default = "/var/lib/Reaktor"; + type = types.str; + description = '' + Reaktor working directory + ''; + }; extraEnviron = mkOption { default = {}; type = types.attrsOf types.str; @@ -70,12 +70,17 @@ let REAKTOR_HOST REAKTOR_PORT REAKTOR_STATEDIR - REAKTOR_CHANNELS debug and nickname can be set separately via the Reaktor api ''; }; - + channels = mkOption { + default = [ "#krebs" ]; + type = types.listOf types.str; + description = '' + Channels the Reaktor should connect to at startup. + ''; + }; debug = mkOption { default = false; description = '' @@ -86,12 +91,11 @@ let imp = { # for reaktor get-config - users.extraUsers = singleton { + users.extraUsers = singleton rec { name = "Reaktor"; - # uid = config.ids.uids.Reaktor; - uid = 2066439104; #genid Reaktor + uid = genid name; description = "Reaktor user"; - home = "/var/lib/Reaktor"; + home = cfg.workdir; createHome = true; }; @@ -113,6 +117,9 @@ let GIT_SSL_CAINFO = "${pkgs.cacert}/etc/ssl/certs/ca-bundle.crt"; REAKTOR_NICKNAME = cfg.nickname; REAKTOR_DEBUG = (if cfg.debug then "True" else "False"); + REAKTOR_CHANNELS = lib.concatStringsSep "," cfg.channels; + state_dir = cfg.workdir; + } // cfg.extraEnviron; serviceConfig= { ExecStartPre = pkgs.writeScript "Reaktor-init" '' diff --git a/krebs/3modules/apt-cacher-ng.nix b/krebs/3modules/apt-cacher-ng.nix index 75296bafb..371d39b6f 100644 --- a/krebs/3modules/apt-cacher-ng.nix +++ b/krebs/3modules/apt-cacher-ng.nix @@ -119,16 +119,14 @@ let imp = { users.extraUsers.acng = { - # uid = config.ids.uids.acng; - uid = 897955083; #genid Reaktor + uid = genid "acng"; description = "apt-cacher-ng"; home = acng-home; createHome = false; }; users.extraGroups.acng = { - gid = 897955083; #genid Reaktor - # gid = config.ids.gids.Reaktor; + gid = genid "acng"; }; systemd.services.apt-cacher-ng = { diff --git a/krebs/3modules/backup.nix b/krebs/3modules/backup.nix new file mode 100644 index 000000000..01bb16a2b --- /dev/null +++ b/krebs/3modules/backup.nix @@ -0,0 +1,286 @@ +{ config, lib, pkgs, ... }: +with lib; +let + out = { + options.krebs.backup = api; + config = mkIf cfg.enable imp; + }; + + cfg = config.krebs.backup; + + api = { + enable = mkEnableOption "krebs.backup" // { default = true; }; + plans = mkOption { + default = {}; + type = types.attrsOf (types.submodule ({ + # TODO enable = mkEnableOption "TODO" // { default = true; }; + options = { + method = mkOption { + type = types.enum ["pull" "push"]; + }; + name = mkOption { + type = types.str; + }; + src = mkOption { + type = types.krebs.file-location; + }; + dst = mkOption { + type = types.krebs.file-location; + }; + startAt = mkOption { + type = types.str; + }; + snapshots = mkOption { + type = types.attrsOf (types.submodule { + options = { + format = mkOption { + type = types.str; # TODO date's +FORMAT + }; + retain = mkOption { + type = types.nullOr types.int; + default = null; # null = retain all snapshots + }; + }; + }); + }; + }; + })); + }; + }; + + imp = { + users.groups.backup.gid = genid "backup"; + users.users = {} + // { + root.openssh.authorizedKeys.keys = + map (plan: plan.dst.host.ssh.pubkey) + (filter isPullSrc (attrValues cfg.plans)) + ++ + map (plan: plan.src.host.ssh.pubkey) + (filter isPushDst (attrValues cfg.plans)) + ; + } + ; + systemd.services = + flip mapAttrs' (filterAttrs (_:isPullDst) cfg.plans) (name: plan: { + name = "backup.${name}.pull"; + value = makePullService plan; + }) + // + flip mapAttrs' (filterAttrs (_:isPushSrc) cfg.plans) (name: plan: { + name = "backup.${name}.push"; + value = makePushService plan; + }) + ; + }; + + isPushSrc = plan: + plan.method == "push" && + plan.src.host.name == config.krebs.build.host.name; + + isPullSrc = plan: + plan.method == "pull" && + plan.src.host.name == config.krebs.build.host.name; + + isPushDst = plan: + plan.method == "push" && + plan.dst.host.name == config.krebs.build.host.name; + + isPullDst = plan: + plan.method == "pull" && + plan.dst.host.name == config.krebs.build.host.name; + + # TODO push destination needs this in the dst.user's PATH + service-path = [ + pkgs.coreutils + pkgs.gnused + pkgs.openssh + pkgs.rsync + pkgs.utillinux + ]; + + # TODO if there is plan.user, then use its privkey + makePushService = plan: assert isPushSrc plan; { + path = service-path; + serviceConfig = { + ExecStart = push plan; + Type = "oneshot"; + }; + startAt = plan.startAt; + }; + + makePullService = plan: assert isPullDst plan; { + path = service-path; + serviceConfig = { + ExecStart = pull plan; + Type = "oneshot"; + }; + startAt = plan.startAt; + }; + + push = plan: let + # We use writeDashBin and return the absolute path so systemd will produce + # nice names in the log, i.e. without the Nix store hash. + out = "${main}/bin/${main.name}"; + + main = writeDashBin "backup.${plan.name}.push" '' + set -efu + dst=${shell.escape plan.dst.path} + + mkdir -m 0700 -p "$dst" + exec flock -n "$dst" ${critical-section} + ''; + + critical-section = writeDash "backup.${plan.name}.push.critical-section" '' + # TODO check if there is a previous + set -efu + identity=${shell.escape plan.src.host.ssh.privkey.path} + src=${shell.escape plan.src.path} + dst_target=${shell.escape "root@${getFQDN plan.dst.host}"} + dst_path=${shell.escape plan.dst.path} + dst=$dst_target:$dst_path + + # Export NOW so runtime of rsync doesn't influence snapshot naming. + export NOW + NOW=$(date +%s) + + echo >&2 "update snapshot: current; $src -> $dst" + rsync >&2 \ + -aAXF --delete \ + -e "ssh -F /dev/null -i $identity" \ + --rsync-path ${shell.escape + "mkdir -m 0700 -p ${shell.escape plan.dst.path} && rsync"} \ + --link-dest="$dst_path/current" \ + "$src/" \ + "$dst/.partial" + + exec ssh -F /dev/null \ + -i "$identity" \ + "$dst_target" \ + -T \ + env NOW="$NOW" /bin/sh < ${remote-snapshot} + EOF + ''; + + remote-snapshot = writeDash "backup.${plan.name}.push.remote-snapshot" '' + set -efu + dst=${shell.escape plan.dst.path} + + if test -e "$dst/current"; then + mv "$dst/current" "$dst/.previous" + fi + mv "$dst/.partial" "$dst/current" + rm -fR "$dst/.previous" + echo >&2 + + (${(take-snapshots plan).text}) + ''; + + in out; + + # TODO admit plan.dst.user and its ssh identity + pull = plan: let + # We use writeDashBin and return the absolute path so systemd will produce + # nice names in the log, i.e. without the Nix store hash. + out = "${main}/bin/${main.name}"; + + main = writeDashBin "backup.${plan.name}.pull" '' + set -efu + dst=${shell.escape plan.dst.path} + + mkdir -m 0700 -p "$dst" + exec flock -n "$dst" ${critical-section} + ''; + + critical-section = writeDash "backup.${plan.name}.pull.critical-section" '' + # TODO check if there is a previous + set -efu + identity=${shell.escape plan.dst.host.ssh.privkey.path} + src=${shell.escape "root@${getFQDN plan.src.host}:${plan.src.path}"} + dst=${shell.escape plan.dst.path} + + # Export NOW so runtime of rsync doesn't influence snapshot naming. + export NOW + NOW=$(date +%s) + + echo >&2 "update snapshot: current; $dst <- $src" + mkdir -m 0700 -p ${shell.escape plan.dst.path} + rsync >&2 \ + -aAXF --delete \ + -e "ssh -F /dev/null -i $identity" \ + --link-dest="$dst/current" \ + "$src/" \ + "$dst/.partial" + mv "$dst/current" "$dst/.previous" + mv "$dst/.partial" "$dst/current" + rm -fR "$dst/.previous" + echo >&2 + + exec ${take-snapshots plan} + ''; + in out; + + take-snapshots = plan: writeDash "backup.${plan.name}.take-snapshots" '' + set -efu + NOW=''${NOW-$(date +%s)} + dst=${shell.escape plan.dst.path} + + snapshot() {( + : $ns $format $retain + name=$(date --date="@$NOW" +"$format") + if ! test -e "$dst/$ns/$name"; then + echo >&2 "create snapshot: $ns/$name" + mkdir -m 0700 -p "$dst/$ns" + rsync >&2 \ + -aAXF --delete \ + --link-dest="$dst/current" \ + "$dst/current/" \ + "$dst/$ns/.partial.$name" + mv "$dst/$ns/.partial.$name" "$dst/$ns/$name" + echo >&2 + fi + case $retain in + ([0-9]*) + delete_from=$(($retain + 1)) + ls -r "$dst/$ns" \ + | sed -n "$delete_from,\$p" \ + | while read old_name; do + echo >&2 "delete snapshot: $ns/$old_name" + rm -fR "$dst/$ns/$old_name" + done + ;; + (ALL) + : + ;; + esac + )} + + ${concatStringsSep "\n" (mapAttrsToList (ns: { format, retain ? null, ... }: + toString (map shell.escape [ + "ns=${ns}" + "format=${format}" + "retain=${if retain == null then "ALL" else toString retain}" + "snapshot" + ])) + plan.snapshots)} + ''; + + # TODO getFQDN: admit hosts in other domains + getFQDN = host: "${host.name}.${config.krebs.search-domain}"; + + writeDash = name: text: pkgs.writeScript name '' + #! ${pkgs.dash}/bin/dash + ${text} + ''; + + writeDashBin = name: text: pkgs.writeTextFile { + executable = true; + destination = "/bin/${name}"; + name = name; + text = '' + #! ${pkgs.dash}/bin/dash + ${text} + ''; + }; + +in out diff --git a/krebs/3modules/bepasty-server.nix b/krebs/3modules/bepasty-server.nix index c99c3d11a..e74841205 100644 --- a/krebs/3modules/bepasty-server.nix +++ b/krebs/3modules/bepasty-server.nix @@ -130,12 +130,12 @@ let ) cfg.servers; users.extraUsers.bepasty = { - uid = 2796546855; #genid bepasty + uid = genid "bepasty"; group = "bepasty"; home = "/var/lib/bepasty-server"; }; users.extraGroups.bepasty = { - gid = 2796546855; #genid bepasty + gid = genid "bepasty"; }; }; diff --git a/krebs/3modules/buildbot/master.nix b/krebs/3modules/buildbot/master.nix new file mode 100644 index 000000000..74385a433 --- /dev/null +++ b/krebs/3modules/buildbot/master.nix @@ -0,0 +1,385 @@ +{ config, pkgs, lib, ... }: + +with lib; +let + buildbot = pkgs.buildbot; + buildbot-master-config = pkgs.writeText "buildbot-master.cfg" '' + # -*- python -*- + from buildbot.plugins import * + import re + import json + c = BuildmasterConfig = {} + + c['slaves'] = [] + slaves = json.loads('${builtins.toJSON cfg.slaves}') + slavenames = [ s for s in slaves ] + for k,v in slaves.items(): + c['slaves'].append(buildslave.BuildSlave(k, v)) + + # TODO: configure protocols? + c['protocols'] = {'pb': {'port': 9989}} + + ####### Build Inputs + c['change_source'] = cs = [] + + ${ concatStringsSep "\n" + (mapAttrsToList (n: v: '' + #### Change_Source: Begin of ${n} + ${v} + #### Change_Source: End of ${n} + '') cfg.change_source )} + + ####### Build Scheduler + c['schedulers'] = sched = [] + + ${ concatStringsSep "\n" + (mapAttrsToList (n: v: '' + #### Schedulers: Begin of ${n} + ${v} + #### Schedulers: End of ${n} + '') cfg.scheduler )} + + ###### Builder + c['builders'] = bu = [] + + # Builder Pre: Begin + ${cfg.builder_pre} + # Builder Pre: End + + ${ concatStringsSep "\n" + (mapAttrsToList (n: v: '' + #### Builder: Begin of ${n} + ${v} + #### Builder: End of ${n} + '') cfg.builder )} + + + ####### Status + c['status'] = st = [] + + # If you want to configure this url, override with extraConfig + c['buildbotURL'] = "http://${config.networking.hostName}:${toString cfg.web.port}/" + + ${optionalString (cfg.web.enable) '' + from buildbot.status import html + from buildbot.status.web import authz, auth + authz_cfg=authz.Authz( + auth=auth.BasicAuth([ ("${cfg.web.username}","${cfg.web.password}") ]), + # TODO: configure harder + gracefulShutdown = False, + forceBuild = 'auth', + forceAllBuilds = 'auth', + pingBuilder = False, + stopBuild = 'auth', + stopAllBuilds = 'auth', + cancelPendingBuild = 'auth' + ) + # TODO: configure krebs.nginx + st.append(html.WebStatus(http_port=${toString cfg.web.port}, authz=authz_cfg)) + ''} + + ${optionalString (cfg.irc.enable) '' + from buildbot.status import words + irc = words.IRC("${cfg.irc.server}", "${cfg.irc.nick}", + channels=${builtins.toJSON cfg.irc.channels}, + notify_events={ + 'success': 1, + 'failure': 1, + 'exception': 1, + 'successToFailure': 1, + 'failureToSuccess': 1, + }${optionalString cfg.irc.allowForce ",allowForce=True"}) + c['status'].append(irc) + ''} + + ${ concatStringsSep "\n" + (mapAttrsToList (n: v: '' + #### Status: Begin of ${n} + ${v} + #### Status: End of ${n} + '') cfg.status )} + + ####### PROJECT IDENTITY + c['title'] = "${cfg.title}" + c['titleURL'] = "http://krebsco.de" + + + ####### DB URL + # TODO: configure + c['db'] = { + 'db_url' : "sqlite:///state.sqlite", + } + ${cfg.extraConfig} + ''; + + cfg = config.krebs.buildbot.master; + + api = { + enable = mkEnableOption "Buildbot Master"; + title = mkOption { + default = "Buildbot CI"; + type = types.str; + description = '' + Title of the Buildbot Installation + ''; + }; + workDir = mkOption { + default = "/var/lib/buildbot/master"; + type = types.str; + description = '' + Path to build bot master directory. + Will be created on startup. + ''; + }; + + secrets = mkOption { + default = []; + type = types.listOf types.str; + example = [ "cac.json" ]; + description = '' + List of all the secrets in <secrets> which should be copied into the + buildbot master directory. + ''; + }; + + slaves = mkOption { + default = {}; + type = types.attrsOf types.str; + description = '' + Attrset of slavenames with their passwords + slavename = slavepassword + ''; + }; + + change_source = mkOption { + default = {}; + type = types.attrsOf types.str; + example = { + stockholm = '' + cs.append(changes.GitPoller( + 'http://cgit.gum/stockholm', + workdir='stockholm-poller', branch='master', + project='stockholm', + pollinterval=120)) + ''; + }; + description = '' + Attrset of all the change_sources which should be configured. + It will be directly included into the master configuration. + + At the end an change object should be appended to <literal>cs</literal> + ''; + }; + + scheduler = mkOption { + default = {}; + type = types.attrsOf types.str; + example = { + force-scheduler = '' + sched.append(schedulers.ForceScheduler( + name="force", + builderNames=["full-tests"])) + ''; + }; + description = '' + Attrset of all the schedulers which should be configured. + It will be directly included into the master configuration. + + At the end an change object should be appended to <literal>sched</literal> + ''; + }; + + builder_pre = mkOption { + default = ""; + type = types.lines; + example = '' + grab_repo = steps.Git(repourl=stockholm_repo, mode='incremental') + ''; + description = '' + some code before the builders are being assembled. + can be used to define functions used by multiple builders + ''; + }; + + builder = mkOption { + default = {}; + type = types.attrsOf types.str; + example = { + fast-test = '' + ''; + }; + description = '' + Attrset of all the builder which should be configured. + It will be directly included into the master configuration. + + At the end an change object should be appended to <literal>bu</literal> + ''; + }; + + status = mkOption { + default = {}; + type = types.attrsOf types.str; + description = '' + Attrset of all the extra status which should be configured. + It will be directly included into the master configuration. + + At the end an change object should be appended to <literal>st</literal> + + Right now IRC and Web status can be configured by setting + <literal>buildbot.master.irc.enable</literal> and + <literal>buildbot.master.web.enable</literal> + ''; + }; + + # Configurable Stati + web = mkOption { + default = {}; + type = types.submodule ({ config2, ... }: { + options = { + enable = mkEnableOption "Buildbot Master Web Status"; + username = mkOption { + default = "krebs"; + type = types.str; + description = '' + username for web authentication + ''; + }; + hostname = mkOption { + default = config.networking.hostName; + type = types.str; + description = '' + web interface Hostname + ''; + }; + password = mkOption { + default = "bob"; + type = types.str; + description = '' + password for web authentication + ''; + }; + port = mkOption { + default = 8010; + type = types.int; + description = '' + port for buildbot web status + ''; + }; + }; + }); + }; + + irc = mkOption { + default = {}; + type = types.submodule ({ config, ... }: { + options = { + enable = mkEnableOption "Buildbot Master IRC Status"; + channels = mkOption { + default = [ "nix-buildbot-meetup" ]; + type = with types; listOf str; + description = '' + irc channels the bot should connect to + ''; + }; + allowForce = mkOption { + default = false; + type = types.bool; + description = '' + Determines if builds can be forced via IRC + ''; + }; + nick = mkOption { + default = "nix-buildbot"; + type = types.str; + description = '' + nickname for IRC + ''; + }; + server = mkOption { + default = "irc.freenode.net"; + type = types.str; + description = '' + Buildbot Status IRC Server to connect to + ''; + }; + }; + }); + }; + + extraConfig = mkOption { + default = ""; + type = types.lines; + description = '' + extra config appended to the generated master.cfg + ''; + }; + }; + + imp = { + + users.extraUsers.buildbotMaster = { + uid = genid "buildbotMaster"; + description = "Buildbot Master"; + home = cfg.workDir; + createHome = false; + }; + + users.extraGroups.buildbotMaster = { + gid = 672626386; + }; + + systemd.services.buildbotMaster = { + description = "Buildbot Master"; + after = [ "network.target" ]; + wantedBy = [ "multi-user.target" ]; + # TODO: add extra dependencies to master like svn and cvs + path = [ pkgs.git ]; + environment = { + SSL_CERT_FILE = "${pkgs.cacert}/etc/ssl/certs/ca-bundle.crt"; + }; + serviceConfig = let + workdir="${lib.shell.escape cfg.workDir}"; + secretsdir="${lib.shell.escape (toString <secrets>)}"; + in { + PermissionsStartOnly = true; + Type = "forking"; + PIDFile = "${workdir}/twistd.pid"; + # TODO: maybe also prepare buildbot.tac? + ExecStartPre = pkgs.writeScript "buildbot-master-init" '' + #!/bin/sh + set -efux + if [ ! -e ${workdir} ];then + mkdir -p ${workdir} + ${buildbot}/bin/buildbot create-master -r -l 10 -f ${workdir} + fi + # always override the master.cfg + cp ${buildbot-master-config} ${workdir}/master.cfg + + # copy secrets + ${ concatMapStringsSep "\n" + (f: "cp ${secretsdir}/${f} ${workdir}/${f}" ) cfg.secrets } + # sanity + ${buildbot}/bin/buildbot checkconfig ${workdir} + + # TODO: maybe upgrade? not sure about this + # normally we should write buildbot.tac by our own + # ${buildbot}/bin/buildbot upgrade-master ${workdir} + + chmod 700 -R ${workdir} + chown buildbotMaster:buildbotMaster -R ${workdir} |