{ // 获取包含Hugging Face文本的span元素 const spans = link.querySelectorAll('span.whitespace-nowrap, span.hidden.whitespace-nowrap'); spans.forEach(span => { if (span.textContent && span.textContent.trim().match(/Hugging\s*Face/i)) { span.textContent = 'AI快站'; } }); }); // 替换logo图片的alt属性 document.querySelectorAll('img[alt*="Hugging"], img[alt*="Face"]').forEach(img => { if (img.alt.match(/Hugging\s*Face/i)) { img.alt = 'AI快站 logo'; } }); } // 替换导航栏中的链接 function replaceNavigationLinks() { // 已替换标记,防止重复运行 if (window._navLinksReplaced) { return; } // 已经替换过的链接集合,防止重复替换 const replacedLinks = new Set(); // 只在导航栏区域查找和替换链接 const headerArea = document.querySelector('header') || document.querySelector('nav'); if (!headerArea) { return; } // 在导航区域内查找链接 const navLinks = headerArea.querySelectorAll('a'); navLinks.forEach(link => { // 如果已经替换过,跳过 if (replacedLinks.has(link)) return; const linkText = link.textContent.trim(); const linkHref = link.getAttribute('href') || ''; // 替换Spaces链接 - 仅替换一次 if ( (linkHref.includes('/spaces') || linkHref === '/spaces' || linkText === 'Spaces' || linkText.match(/^s*Spacess*$/i)) && linkText !== 'OCR模型免费转Markdown' && linkText !== 'OCR模型免费转Markdown' ) { link.textContent = 'OCR模型免费转Markdown'; link.href = 'https://fast360.xyz'; link.setAttribute('target', '_blank'); link.setAttribute('rel', 'noopener noreferrer'); replacedLinks.add(link); } // 删除Posts链接 else if ( (linkHref.includes('/posts') || linkHref === '/posts' || linkText === 'Posts' || linkText.match(/^s*Postss*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } // 替换Docs链接 - 仅替换一次 else if ( (linkHref.includes('/docs') || linkHref === '/docs' || linkText === 'Docs' || linkText.match(/^s*Docss*$/i)) && linkText !== '模型下载攻略' ) { link.textContent = '模型下载攻略'; link.href = '/'; replacedLinks.add(link); } // 删除Enterprise链接 else if ( (linkHref.includes('/enterprise') || linkHref === '/enterprise' || linkText === 'Enterprise' || linkText.match(/^s*Enterprises*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } }); // 查找可能嵌套的Spaces和Posts文本 const textNodes = []; function findTextNodes(element) { if (element.nodeType === Node.TEXT_NODE) { const text = element.textContent.trim(); if (text === 'Spaces' || text === 'Posts' || text === 'Enterprise') { textNodes.push(element); } } else { for (const child of element.childNodes) { findTextNodes(child); } } } // 只在导航区域内查找文本节点 findTextNodes(headerArea); // 替换找到的文本节点 textNodes.forEach(node => { const text = node.textContent.trim(); if (text === 'Spaces') { node.textContent = node.textContent.replace(/Spaces/g, 'OCR模型免费转Markdown'); } else if (text === 'Posts') { // 删除Posts文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } else if (text === 'Enterprise') { // 删除Enterprise文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } }); // 标记已替换完成 window._navLinksReplaced = true; } // 替换代码区域中的域名 function replaceCodeDomains() { // 特别处理span.hljs-string和span.njs-string元素 document.querySelectorAll('span.hljs-string, span.njs-string, span[class*="hljs-string"], span[class*="njs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换hljs-string类的span中的域名(移除多余的转义符号) document.querySelectorAll('span.hljs-string, span[class*="hljs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换pre和code标签中包含git clone命令的域名 document.querySelectorAll('pre, code').forEach(element => { if (element.textContent && element.textContent.includes('git clone')) { const text = element.innerHTML; if (text.includes('huggingface.co')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 处理特定的命令行示例 document.querySelectorAll('pre, code').forEach(element => { const text = element.innerHTML; if (text.includes('huggingface.co')) { // 针对git clone命令的专门处理 if (text.includes('git clone') || text.includes('GIT_LFS_SKIP_SMUDGE=1')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 特别处理模型下载页面上的代码片段 document.querySelectorAll('.flex.border-t, .svelte_hydrator, .inline-block').forEach(container => { const content = container.innerHTML; if (content && content.includes('huggingface.co')) { container.innerHTML = content.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 特别处理模型仓库克隆对话框中的代码片段 try { // 查找包含"Clone this model repository"标题的对话框 const cloneDialog = document.querySelector('.svelte_hydration_boundary, [data-target="MainHeader"]'); if (cloneDialog) { // 查找对话框中所有的代码片段和命令示例 const codeElements = cloneDialog.querySelectorAll('pre, code, span'); codeElements.forEach(element => { if (element.textContent && element.textContent.includes('huggingface.co')) { if (element.innerHTML.includes('huggingface.co')) { element.innerHTML = element.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { element.textContent = element.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); } // 更精确地定位克隆命令中的域名 document.querySelectorAll('[data-target]').forEach(container => { const codeBlocks = container.querySelectorAll('pre, code, span.hljs-string'); codeBlocks.forEach(block => { if (block.textContent && block.textContent.includes('huggingface.co')) { if (block.innerHTML.includes('huggingface.co')) { block.innerHTML = block.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { block.textContent = block.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); }); } catch (e) { // 错误处理但不打印日志 } } // 当DOM加载完成后执行替换 if (document.readyState === 'loading') { document.addEventListener('DOMContentLoaded', () => { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); }); } else { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); } // 增加一个MutationObserver来处理可能的动态元素加载 const observer = new MutationObserver(mutations => { // 检查是否导航区域有变化 const hasNavChanges = mutations.some(mutation => { // 检查是否存在header或nav元素变化 return Array.from(mutation.addedNodes).some(node => { if (node.nodeType === Node.ELEMENT_NODE) { // 检查是否是导航元素或其子元素 if (node.tagName === 'HEADER' || node.tagName === 'NAV' || node.querySelector('header, nav')) { return true; } // 检查是否在导航元素内部 let parent = node.parentElement; while (parent) { if (parent.tagName === 'HEADER' || parent.tagName === 'NAV') { return true; } parent = parent.parentElement; } } return false; }); }); // 只在导航区域有变化时执行替换 if (hasNavChanges) { // 重置替换状态,允许再次替换 window._navLinksReplaced = false; replaceHeaderBranding(); replaceNavigationLinks(); } }); // 开始观察document.body的变化,包括子节点 if (document.body) { observer.observe(document.body, { childList: true, subtree: true }); } else { document.addEventListener('DOMContentLoaded', () => { observer.observe(document.body, { childList: true, subtree: true }); }); } })(); \n\n\"\"\")\n\nclass HashBang(Grammar):\n\tgrammar = (L('#!'), REST_OF_LINE, EOL)\n\tdef value(self):\n\t\treturn \"\"\n\ndef strip_indent(s):\n\ts = s.replace(\"\\t\", \" \")\n\tlines = s.split(\"\\n\")\n\ttry:\n\t\tmin_indent = min(len(re.match(r\"\\s*\", line).group(0)) for line in lines if len(line) > 0)\n\texcept ValueError:\n\t\t# No non-empty lines.\n\t\tmin_indent = 0\n\tlines = [line[min_indent:] for line in lines]\n\treturn \"\\n\".join(lines)\n\nclass Comment(Grammar):\n\tgrammar = ONE_OR_MORE(ZERO_OR_MORE(SPACE), L('#'), REST_OF_LINE, EOL)\n\tdef value(self):\n\t\tif self.string.replace(\"#\", \"\").strip() == \"\":\n\t\t\treturn \"\\n\"\n\t\tlines = [x[2].string for x in self[0]]\n\t\tcontent = \"\\n\".join(lines)\n\t\tcontent = strip_indent(content)\n\t\treturn markdown.markdown(content, output_format=\"html4\") + \"\\n\\n\"\n\nFILENAME = WORD('a-z0-9-/.')\n\nclass Source(Grammar):\n\tgrammar = ((L('.') | L('source')), L(' '), FILENAME, Comment | EOL)\n\tdef filename(self):\n\t\treturn self[2].string.strip()\n\tdef value(self):\n\t\treturn BashScript.parse(self.filename())\n\nclass CatEOF(Grammar):\n\tgrammar = (ZERO_OR_MORE(SPACE), L('cat '), L('>') | L('>>'), L(' '), ANY_EXCEPT(WHITESPACE), L(\" <<\"), OPTIONAL(SPACE), L(\"EOF\"), EOL, REPEAT(ANY, greedy=False), EOL, L(\"EOF\"), EOL)\n\tdef value(self):\n\t\tcontent = self[9].string\n\t\tcontent = re.sub(r\"\\\\([$])\", r\"\\1\", content) # un-escape bash-escaped characters\n\t\treturn \"
%s (%s)
%s
\\n\" \\\n\t\t\t% (self[4].string,\n\t\t\t \"overwrite\" if \">>\" not in self[2].string else \"append to\",\n\t\t\t cgi.escape(content))\n\nclass HideOutput(Grammar):\n\tgrammar = (L(\"hide_output \"), REF(\"BashElement\"))\n\tdef value(self):\n\t\treturn self[1].value()\n\nclass EchoLine(Grammar):\n\tgrammar = (OPTIONAL(SPACE), L(\"echo \"), REST_OF_LINE, EOL)\n\tdef value(self):\n\t\tif \"|\" in self.string or \">\" in self.string:\n\t\t\treturn \"
\" + recode_bash(self.string.strip()) + \"
\\n\"\n\t\treturn \"\"\n\nclass EditConf(Grammar):\n\tgrammar = (\n\t\tL('tools/editconf.py '),\n\t\tFILENAME,\n\t\tSPACE,\n\t\tOPTIONAL((LIST_OF(\n\t\t\tL(\"-w\") | L(\"-s\") | L(\"-c ;\"),\n\t\t\tsep=SPACE,\n\t\t), SPACE)),\n\t\tREST_OF_LINE,\n\t\tOPTIONAL(SPACE),\n\t\tEOL\n\t\t)\n\tdef value(self):\n\t\tconffile = self[1]\n\t\toptions = []\n\t\teq = \"=\"\n\t\tif self[3] and \"-s\" in self[3].string: eq = \" \"\n\t\tfor opt in re.split(\"\\s+\", self[4].string):\n\t\t\tk, v = opt.split(\"=\", 1)\n\t\t\tv = re.sub(r\"\\n+\", \"\", fixup_tokens(v)) # not sure why newlines are getting doubled\n\t\t\toptions.append(\"%s%s%s\" % (k, eq, v))\n\t\treturn \"
\" + self[1].string + \" (change settings)
\" + \"\\n\".join(cgi.escape(s) for s in options) + \"
\\n\"\n\nclass CaptureOutput(Grammar):\n\tgrammar = OPTIONAL(SPACE), WORD(\"A-Za-z_\"), L('=$('), REST_OF_LINE, L(\")\"), OPTIONAL(L(';')), EOL\n\tdef value(self):\n\t\tcmd = self[3].string\n\t\tcmd = cmd.replace(\"; \", \"\\n\")\n\t\treturn \"
$\" + self[1].string + \"=
\" + cgi.escape(cmd) + \"
\\n\"\n\nclass SedReplace(Grammar):\n\tgrammar = OPTIONAL(SPACE), L('sed -i \"s/'), OPTIONAL(L('^')), ONE_OR_MORE(WORD(\"-A-Za-z0-9 #=\\\\{};.*$_!()\")), L('/'), ONE_OR_MORE(WORD(\"-A-Za-z0-9 #=\\\\{};.*$_!()\")), L('/\"'), SPACE, FILENAME, EOL\n\tdef value(self):\n\t\treturn \"
edit
\" + self[8].string + \"

replace

\" + cgi.escape(self[3].string.replace(\".*\", \". . .\")) + \"

with

\" + cgi.escape(self[5].string.replace(\"\\\\n\", \"\\n\").replace(\"\\\\t\", \"\\t\")) + \"
\\n\"\n\nclass EchoPipe(Grammar):\n\tgrammar = OPTIONAL(SPACE), L(\"echo \"), REST_OF_LINE, L(' | '), REST_OF_LINE, EOL\n\tdef value(self):\n\t\ttext = \" \".join(\"\\\"%s\\\"\" % s for s in self[2].string.split(\" \"))\n\t\treturn \"
echo \" + recode_bash(text) + \" \\
| \" + recode_bash(self[4].string) + \"
\\n\"\n\ndef shell_line(bash):\n\treturn \"
\" + recode_bash(bash.strip()) + \"
\\n\"\n\nclass AptGet(Grammar):\n\tgrammar = (ZERO_OR_MORE(SPACE), L(\"apt_install \"), REST_OF_LINE, EOL)\n\tdef value(self):\n\t\treturn shell_line(\"apt-get install -y \" + re.sub(r\"\\s+\", \" \", self[2].string))\nclass UfwAllow(Grammar):\n\tgrammar = (ZERO_OR_MORE(SPACE), L(\"ufw_allow \"), REST_OF_LINE, EOL)\n\tdef value(self):\n\t\treturn shell_line(\"ufw allow \" + self[2].string)\nclass UfwLimit(Grammar):\n\tgrammar = (ZERO_OR_MORE(SPACE), L(\"ufw_limit \"), REST_OF_LINE, EOL)\n\tdef value(self):\n\t\treturn shell_line(\"ufw limit \" + self[2].string)\nclass RestartService(Grammar):\n\tgrammar = (ZERO_OR_MORE(SPACE), L(\"restart_service \"), REST_OF_LINE, EOL)\n\tdef value(self):\n\t\treturn shell_line(\"service \" + self[2].string + \" restart\")\n\nclass OtherLine(Grammar):\n\tgrammar = (REST_OF_LINE, EOL)\n\tdef value(self):\n\t\tif self.string.strip() == \"\": return \"\"\n\t\tif \"source setup/functions.sh\" in self.string: return \"\"\n\t\tif \"source /etc/mailinabox.conf\" in self.string: return \"\"\n\t\treturn \"
\" + recode_bash(self.string.strip()) + \"
\\n\"\n\nclass BashElement(Grammar):\n\tgrammar = Comment | CatEOF | EchoPipe | EchoLine | HideOutput | EditConf | SedReplace | AptGet | UfwAllow | UfwLimit | RestartService | OtherLine\n\tdef value(self):\n\t\treturn self[0].value()\n\n# Make some special characters to private use Unicode code points.\nbash_special_characters1 = {\n\t\"\\n\": \"\\uE000\",\n\t\" \": \"\\uE001\",\n}\nbash_special_characters2 = {\n\t\"$\": \"\\uE010\",\n}\nbash_escapes = {\n\t\"n\": \"\\uE020\",\n\t\"t\": \"\\uE021\",\n}\n\ndef quasitokenize(bashscript):\n\t# Make a parse of bash easier by making the tokenization easy.\n\tnewscript = \"\"\n\tquote_mode = None\n\tescape_next = False\n\tline_comment = False\n\tsubshell = 0\n\tfor c in bashscript:\n\t\tif line_comment:\n\t\t\t# We're in a comment until the end of the line.\n\t\t\tnewscript += c\n\t\t\tif c == '\\n':\n\t\t\t\tline_comment = False\n\t\telif escape_next:\n\t\t\t# Previous character was a \\. Normally the next character\n\t\t\t# comes through literally, but escaped newlines are line\n\t\t\t# continuations and some escapes are for special characters\n\t\t\t# which we'll recode and then turn back into escapes later.\n\t\t\tif c == \"\\n\":\n\t\t\t\tc = \" \"\n\t\t\telif c in bash_escapes:\n\t\t\t\tc = bash_escapes[c]\n\t\t\tnewscript += c\n\t\t\tescape_next = False\n\t\telif c == \"\\\\\":\n\t\t\t# Escaping next character.\n\t\t\tescape_next = True\n\t\telif quote_mode is None and c in ('\"', \"'\"):\n\t\t\t# Starting a quoted word.\n\t\t\tquote_mode = c\n\t\telif c == quote_mode:\n\t\t\t# Ending a quoted word.\n\t\t\tquote_mode = None\n\t\telif quote_mode is not None and quote_mode != \"EOF\" and c in bash_special_characters1:\n\t\t\t# Replace special tokens within quoted words so that they\n\t\t\t# don't interfere with tokenization later.\n\t\t\tnewscript += bash_special_characters1[c]\n\t\telif quote_mode is None and c == '#':\n\t\t\t# Start of a line comment.\n\t\t\tnewscript += c\n\t\t\tline_comment = True\n\t\telif quote_mode is None and c == ';' and subshell == 0:\n\t\t\t# End of a statement.\n\t\t\tnewscript += \"\\n\"\n\t\telif quote_mode is None and c == '(':\n\t\t\t# Start of a subshell.\n\t\t\tnewscript += c\n\t\t\tsubshell += 1\n\t\telif quote_mode is None and c == ')':\n\t\t\t# End of a subshell.\n\t\t\tnewscript += c\n\t\t\tsubshell -= 1\n\t\telif quote_mode is None and c == '\\t':\n\t\t\t# Make these just spaces.\n\t\t\tif newscript[-1] != \" \":\n\t\t\t\tnewscript += \" \"\n\t\telif quote_mode is None and c == ' ':\n\t\t\t# Collapse consecutive spaces.\n\t\t\tif newscript[-1] != \" \":\n\t\t\t\tnewscript += \" \"\n\t\telif c in bash_special_characters2:\n\t\t\tnewscript += bash_special_characters2[c]\n\t\telse:\n\t\t\t# All other characters.\n\t\t\tnewscript += c\n\n\t\t# \"<< EOF\" escaping.\n\t\tif quote_mode is None and re.search(\"<<\\s*EOF\\n$\", newscript):\n\t\t\tquote_mode = \"EOF\"\n\t\telif quote_mode == \"EOF\" and re.search(\"\\nEOF\\n$\", newscript):\n\t\t\tquote_mode = None\n\n\treturn newscript\n\ndef recode_bash(s):\n\tdef requote(tok):\n\t\ttok = tok.replace(\"\\\\\", \"\\\\\\\\\")\n\t\tfor c in bash_special_characters2:\n\t\t\ttok = tok.replace(c, \"\\\\\" + c)\n\t\ttok = fixup_tokens(tok)\n\t\tif \" \" in tok or '\"' in tok:\n\t\t\ttok = tok.replace(\"\\\"\", \"\\\\\\\"\")\n\t\t\ttok = '\"' + tok +'\"'\n\t\telse:\n\t\t\ttok = tok.replace(\"'\", \"\\\\'\")\n\t\treturn tok\n\treturn cgi.escape(\" \".join(requote(tok) for tok in s.split(\" \")))\n\ndef fixup_tokens(s):\n\tfor c, enc in bash_special_characters1.items():\n\t\ts = s.replace(enc, c)\n\tfor c, enc in bash_special_characters2.items():\n\t\ts = s.replace(enc, c)\n\tfor esc, c in bash_escapes.items():\n\t\ts = s.replace(c, \"\\\\\" + esc)\n\treturn s\n\nclass BashScript(Grammar):\n\tgrammar = (OPTIONAL(HashBang), REPEAT(BashElement))\n\tdef value(self):\n\t\treturn [line.value() for line in self[1]]\n\n\t@staticmethod\n\tdef parse(fn):\n\t\tif fn in (\"setup/functions.sh\", \"/etc/mailinabox.conf\"): return \"\"\n\t\twith open(fn, \"r\") as f:\n\t\t\tstring = f.read()\n\n\t\t# tokenize\n\t\tstring = re.sub(\".* #NODOC\\n\", \"\", string)\n\t\tstring = re.sub(\"\\n\\s*if .*then.*|\\n\\s*fi|\\n\\s*else|\\n\\s*elif .*\", \"\", string)\n\t\tstring = quasitokenize(string)\n\t\tstring = re.sub(\"hide_output \", \"\", string)\n\n\t\tparser = BashScript.parser()\n\t\tresult = parser.parse_string(string)\n\n\t\tv = \"
view the bash source for the following section at %s
\\n\" \\\n\t\t\t % (\"https://github.com/mail-in-a-box/mailinabox/tree/master/\" + fn, fn)\n\n\t\tmode = 0\n\t\tfor item in result.value():\n\t\t\tif item.strip() == \"\":\n\t\t\t\tpass\n\t\t\telif item.startswith(\"\\n\" # col\n\t\t\t\t\tv += \"
\\n\" # row\n\t\t\t\t\tmode = 0\n\t\t\t\t\tclz = \"contd\"\n\t\t\t\tif mode == 0:\n\t\t\t\t\tv += \"
\\n\" % clz\n\t\t\t\t\tv += \"
\\n\"\n\t\t\t\tv += item\n\t\t\t\tmode = 1\n\t\t\telif item.startswith(\"\\n\" # col\n\t\t\t\t\tv += \"
\\n\" # row\n\t\t\t\tv += \"
\\n\"\n\t\t\t\tv += \"
\\n\"\n\t\t\t\tv += item\n\t\t\t\tv += \"
\\n\" # col\n\t\t\t\tv += \"
\\n\"\n\t\t\t\tv += \"
\\n\" # row\n\t\t\t\tmode = 0\n\t\t\telse:\n\t\t\t\tif mode == 0:\n\t\t\t\t\tv += \"
\\n\"\n\t\t\t\t\tv += \"
\\n\"\n\t\t\t\telif mode == 1:\n\t\t\t\t\tv += \"
\\n\"\n\t\t\t\t\tv += \"
\\n\"\n\t\t\t\tmode = 2\n\t\t\t\tv += item\n\n\t\tv += \"
\\n\" # col\n\t\tv += \"
\\n\" # row\n\n\t\tv = fixup_tokens(v)\n\n\t\tv = v.replace(\"\\n
\", \"\")\n\t\tv = re.sub(\"
([\\w\\W]*?)
\", lambda m : \"
\" + strip_indent(m.group(1)) + \"
\", v)\n\n\t\tv = re.sub(r\"(\\$?)PRIMARY_HOSTNAME\", r\"box.yourdomain.com\", v)\n\t\tv = re.sub(r\"\\$STORAGE_ROOT\", r\"$STORE\", v)\n\t\tv = v.replace(\"`pwd`\", \"/path/to/mailinabox\")\n\n\t\treturn v\n\ndef wrap_lines(text, cols=60):\n\tret = \"\"\n\twords = re.split(\"(\\s+)\", text)\n\tlinelen = 0\n\tfor w in words:\n\t\tif linelen + len(w) > cols-1:\n\t\t\tret += \" \\\\\\n\"\n\t\t\tret += \" \"\n\t\t\tlinelen = 0\n\t\tif linelen == 0 and w.strip() == \"\": continue\n\t\tret += w\n\t\tlinelen += len(w)\n\treturn ret\n\nif __name__ == '__main__':\n\tgenerate_documentation()\n\n\n\nFile: tools/parse-nginx-log-bootstrap-accesses.py\n\n#!/usr/bin/python3\n#\n# This is a tool Josh uses on his box serving mailinabox.email to parse the nginx\n# access log to see how many people are installing Mail-in-a-Box each day, by\n# looking at accesses to the bootstrap.sh script (which is currently at the URL\n# .../setup.sh).\n\nimport re, glob, gzip, os.path, json\nimport dateutil.parser\n\noutfn = \"/home/user-data/www/mailinabox.email/install-stats.json\"\n\n# Make a unique list of (date, ip address) pairs so we don't double-count\n# accesses that are for the same install.\naccesses = set()\n\n# Scan the current and rotated access logs.\nfor fn in glob.glob(\"/var/log/nginx/access.log*\"):\n\t# Gunzip if necessary.\n\t# Loop through the lines in the access log.\n\twith (gzip.open if fn.endswith(\".gz\") else open)(fn, \"rb\") as f:\n\t\tfor line in f:\n\t\t\t# Find lines that are GETs on the bootstrap script by either curl or wget.\n\t\t\t# (Note that we purposely skip ...?ping=1 requests which is the admin panel querying us for updates.)\n\t\t\t# (Also, the URL changed in January 2016, but we'll accept both.)\n\t\t\tm = re.match(rb\"(?P\\S+) - - \\[(?P.*?)\\] \\\"GET /(bootstrap.sh|setup.sh) HTTP/.*\\\" 200 \\d+ .* \\\"(?:curl|wget)\", line, re.I)\n\t\t\tif m:\n\t\t\t\tdate, time = m.group(\"date\").decode(\"ascii\").split(\":\", 1)\n\t\t\t\tdate = dateutil.parser.parse(date).date().isoformat()\n\t\t\t\tip = m.group(\"ip\").decode(\"ascii\")\n\t\t\t\taccesses.add( (date, ip) )\n\n# Aggregate by date.\nby_date = { }\nfor date, ip in accesses:\n\tby_date[date] = by_date.get(date, 0) + 1\n\n# Since logs are rotated, store the statistics permanently in a JSON file.\n# Load in the stats from an existing file.\nif os.path.exists(outfn):\n\twith open(outfn, encoding=\"utf-8\") as f:\n\t\texisting_data = json.load(f)\n\tfor date, count in existing_data:\n\t\tif date not in by_date:\n\t\t\tby_date[date] = count\n\n# Turn into a list rather than a dict structure to make it ordered.\nby_date = sorted(by_date.items())\n\n# Pop the last one because today's stats are incomplete.\nby_date.pop(-1)\n\n# Write out.\nwith open(outfn, \"w\", encoding=\"utf-8\") as f:\n\tjson.dump(by_date, f, sort_keys=True, indent=True)\n\n\n\nFile: tools/editconf.py\n\n#!/usr/bin/python3\n#\n# This is a helper tool for editing configuration files during the setup\n# process. The tool is given new values for settings as command-line\n# arguments. It comments-out existing setting values in the configuration\n# file and adds new values either after their former location or at the\n# end.\n#\n# The configuration file has settings that look like:\n#\n# NAME=VALUE\n#\n# If the -s option is given, then space becomes the delimiter, i.e.:\n#\n# NAME VALUE\n#\n# If the -e option is given and VALUE is empty, the setting is removed\n# from the configuration file if it is set (i.e. existing occurrences\n# are commented out and no new setting is added).\n#\n# If the -c option is given, then the supplied character becomes the comment character\n#\n# If the -w option is given, then setting lines continue onto following\n# lines while the lines start with whitespace, e.g.:\n#\n# NAME VAL\n# UE\n\nimport sys, re\n\n# sanity check\nif len(sys.argv) < 3:\n\tprint(\"usage: python3 editconf.py /etc/file.conf [-e] [-s] [-w] [-c ] [-t] NAME=VAL [NAME=VAL ...]\")\n\tsys.exit(1)\n\n# parse command line arguments\nfilename = sys.argv[1]\nsettings = sys.argv[2:]\n\ndelimiter = \"=\"\ndelimiter_re = r\"\\s*=\\s*\"\nerase_setting = False\ncomment_char = \"#\"\nfolded_lines = False\ntesting = False\nwhile settings[0][0] == \"-\" and settings[0] != \"--\":\n\topt = settings.pop(0)\n\tif opt == \"-s\":\n\t\t# Space is the delimiter\n\t\tdelimiter = \" \"\n\t\tdelimiter_re = r\"\\s+\"\n\telif opt == \"-e\":\n\t\t# Erase settings that have empty values.\n\t\terase_setting = True\n\telif opt == \"-w\":\n\t\t# Line folding is possible in this file.\n\t\tfolded_lines = True\n\telif opt == \"-c\":\n\t\t# Specifies a different comment character.\n\t\tcomment_char = settings.pop(0)\n\telif opt == \"-t\":\n\t\ttesting = True\n\telse:\n\t\tprint(\"Invalid option.\")\n\t\tsys.exit(1)\n\n# sanity check command line\nfor setting in settings:\n\ttry:\n\t\tname, value = setting.split(\"=\", 1)\n\texcept:\n\t\timport subprocess\n\t\tprint(\"Invalid command line: \", subprocess.list2cmdline(sys.argv))\n\n# create the new config file in memory\n\nfound = set()\nbuf = \"\"\nwith open(filename, encoding=\"utf-8\") as f:\n input_lines = list(f)\n\nwhile len(input_lines) > 0:\n\tline = input_lines.pop(0)\n\n\t# If this configuration file uses folded lines, append any folded lines\n\t# into our input buffer.\n\tif folded_lines and line[0] not in {comment_char, \" \", \"\"}:\n\t\twhile len(input_lines) > 0 and input_lines[0][0] in \" \\t\":\n\t\t\tline += input_lines.pop(0)\n\n\t# See if this line is for any settings passed on the command line.\n\tfor i in range(len(settings)):\n\t\t# Check if this line contain this setting from the command-line arguments.\n\t\tname, val = settings[i].split(\"=\", 1)\n\t\tm = re.match(\n\t\t\t r\"(\\s*)\"\n\t\t\t \"(\" + re.escape(comment_char) + r\"\\s*)?\"\n\t\t\t + re.escape(name) + delimiter_re + r\"(.*?)\\s*$\",\n\t\t\t line, re.S)\n\t\tif not m: continue\n\t\tindent, is_comment, existing_val = m.groups()\n\n\t\t# If this is already the setting, keep it in the file, except:\n\t\t# * If we've already seen it before, then remove this duplicate line.\n\t\t# * If val is empty and erase_setting is on, then comment it out.\n\t\tif is_comment is None and existing_val == val and not (not val and erase_setting):\n\t\t\t# It may be that we've already inserted this setting higher\n\t\t\t# in the file so check for that first.\n\t\t\tif i in found: break\n\t\t\tbuf += line\n\t\t\tfound.add(i)\n\t\t\tbreak\n\n\t\t# comment-out the existing line (also comment any folded lines)\n\t\tif is_comment is None:\n\t\t\tbuf += comment_char + line.rstrip().replace(\"\\n\", \"\\n\" + comment_char) + \"\\n\"\n\t\telse:\n\t\t\t# the line is already commented, pass it through\n\t\t\tbuf += line\n\n\t\t# if this option already is set don't add the setting again,\n\t\t# or if we're clearing the setting with -e, don't add it\n\t\tif (i in found) or (not val and erase_setting):\n\t\t\tbreak\n\n\t\t# add the new setting\n\t\tbuf += indent + name + delimiter + val + \"\\n\"\n\n\t\t# note that we've applied this option\n\t\tfound.add(i)\n\n\t\tbreak\n\telse:\n\t\t# If did not match any setting names, pass this line through.\n\t\tbuf += line\n\n# Put any settings we didn't see at the end of the file,\n# except settings being cleared.\nfor i in range(len(settings)):\n\tif i not in found:\n\t\tname, val = settings[i].split(\"=\", 1)\n\t\tif not (not val and erase_setting):\n\t\t\tbuf += name + delimiter + val + \"\\n\"\n\nif not testing:\n\t# Write out the new file.\n\twith open(filename, \"w\", encoding=\"utf-8\") as f:\n\t\tf.write(buf)\nelse:\n\t# Just print the new file to stdout.\n\tprint(buf)\n\n\n\nFile: setup/migrate.py\n\n#!/usr/bin/python3\n\n# Migrates any file structures, database schemas, etc. between versions of Mail-in-a-Box.\n\n# We have to be careful here that any dependencies are already installed in the previous\n# version since this script runs before all other aspects of the setup script.\n\nimport sys, os, os.path, glob, re, shutil\n\nsys.path.insert(0, 'management')\nfrom utils import load_environment, save_environment, shell\nimport contextlib\n\ndef migration_1(env):\n\t# Re-arrange where we store SSL certificates. There was a typo also.\n\n\tdef move_file(fn, domain_name_escaped, filename):\n\t\t# Moves an SSL-related file into the right place.\n\t\tfn1 = os.path.join( env[\"STORAGE_ROOT\"], 'ssl', domain_name_escaped, file_type)\n\t\tos.makedirs(os.path.dirname(fn1), exist_ok=True)\n\t\tshutil.move(fn, fn1)\n\n\t# Migrate the 'domains' directory.\n\tfor sslfn in glob.glob(os.path.join( env[\"STORAGE_ROOT\"], 'ssl/domains/*' )):\n\t\tfn = os.path.basename(sslfn)\n\t\tm = re.match(\"(.*)_(certifiate.pem|cert_sign_req.csr|private_key.pem)$\", fn)\n\t\tif m:\n\t\t\t# get the new name for the file\n\t\t\tdomain_name, file_type = m.groups()\n\t\t\tif file_type == \"certifiate.pem\": file_type = \"ssl_certificate.pem\" # typo\n\t\t\tif file_type == \"cert_sign_req.csr\": file_type = \"certificate_signing_request.csr\" # nicer\n\t\t\tmove_file(sslfn, domain_name, file_type)\n\n\t# Move the old domains directory if it is now empty.\n\twith contextlib.suppress(Exception):\n\t\tos.rmdir(os.path.join( env[\"STORAGE_ROOT\"], 'ssl/domains'))\n\ndef migration_2(env):\n\t# Delete the .dovecot_sieve script everywhere. This was formerly a copy of our spam -> Spam\n\t# script. We now install it as a global script, and we use managesieve, so the old file is\n\t# irrelevant. Also delete the compiled binary form.\n\tfor fn in glob.glob(os.path.join(env[\"STORAGE_ROOT\"], 'mail/mailboxes/*/*/.dovecot.sieve')):\n\t\tos.unlink(fn)\n\tfor fn in glob.glob(os.path.join(env[\"STORAGE_ROOT\"], 'mail/mailboxes/*/*/.dovecot.svbin')):\n\t\tos.unlink(fn)\n\ndef migration_3(env):\n\t# Move the migration ID from /etc/mailinabox.conf to $STORAGE_ROOT/mailinabox.version\n\t# so that the ID stays with the data files that it describes the format of. The writing\n\t# of the file will be handled by the main function.\n\tpass\n\ndef migration_4(env):\n\t# Add a new column to the mail users table where we can store administrative privileges.\n\tdb = os.path.join(env[\"STORAGE_ROOT\"], 'mail/users.sqlite')\n\tshell(\"check_call\", [\"sqlite3\", db, \"ALTER TABLE users ADD privileges TEXT NOT NULL DEFAULT ''\"])\n\ndef migration_5(env):\n\t# The secret key for encrypting backups was world readable. Fix here.\n\tos.chmod(os.path.join(env[\"STORAGE_ROOT\"], 'backup/secret_key.txt'), 0o600)\n\ndef migration_6(env):\n\t# We now will generate multiple DNSSEC keys for different algorithms, since TLDs may\n\t# not support them all. .email only supports RSA/SHA-256. Rename the keys.conf file\n\t# to be algorithm-specific.\n\tbasepath = os.path.join(env[\"STORAGE_ROOT\"], 'dns/dnssec')\n\tshutil.move(os.path.join(basepath, 'keys.conf'), os.path.join(basepath, 'RSASHA1-NSEC3-SHA1.conf'))\n\ndef migration_7(env):\n\t# I previously wanted domain names to be stored in Unicode in the database. Now I want them\n\t# to be in IDNA. Affects aliases only.\n\timport sqlite3\n\tconn = sqlite3.connect(os.path.join(env[\"STORAGE_ROOT\"], \"mail/users.sqlite\"))\n\n\t# Get existing alias source addresses.\n\tc = conn.cursor()\n\tc.execute('SELECT source FROM aliases')\n\taliases = [ row[0] for row in c.fetchall() ]\n\n\t# Update to IDNA-encoded domains.\n\tfor email in aliases:\n\t\ttry:\n\t\t\tlocalpart, domainpart = email.split(\"@\")\n\t\t\tdomainpart = domainpart.encode(\"idna\").decode(\"ascii\")\n\t\t\tnewemail = localpart + \"@\" + domainpart\n\t\t\tif newemail != email:\n\t\t\t\tc = conn.cursor()\n\t\t\t\tc.execute(\"UPDATE aliases SET source=? WHERE source=?\", (newemail, email))\n\t\t\t\tif c.rowcount != 1: raise ValueError(\"Alias not found.\")\n\t\t\t\tprint(\"Updated alias\", email, \"to\", newemail)\n\t\texcept Exception as e:\n\t\t\tprint(\"Error updating IDNA alias\", email, e)\n\n\t# Save.\n\tconn.commit()\n\ndef migration_8(env):\n\t# Delete DKIM keys. We had generated 1024-bit DKIM keys.\n\t# By deleting the key file we'll automatically generate\n\t# a new key, which will be 2048 bits.\n\tos.unlink(os.path.join(env['STORAGE_ROOT'], 'mail/dkim/mail.private'))\n\ndef migration_9(env):\n\t# Add a column to the aliases table to store permitted_senders,\n\t# which is a list of user account email addresses that are\n\t# permitted to send mail using this alias instead of their own\n\t# address. This was motivated by the addition of #427 (\"Reject\n\t# outgoing mail if FROM does not match Login\") - which introduced\n\t# the notion of outbound permitted-senders.\n\tdb = os.path.join(env[\"STORAGE_ROOT\"], 'mail/users.sqlite')\n\tshell(\"check_call\", [\"sqlite3\", db, \"ALTER TABLE aliases ADD permitted_senders TEXT\"])\n\ndef migration_10(env):\n\t# Clean up the SSL certificates directory.\n\n\t# Move the primary certificate to a new name and then\n\t# symlink it to the system certificate path.\n\timport datetime\n\tsystem_certificate = os.path.join(env[\"STORAGE_ROOT\"], 'ssl/ssl_certificate.pem')\n\tif not os.path.islink(system_certificate): # not already a symlink\n\t\tnew_path = os.path.join(env[\"STORAGE_ROOT\"], 'ssl', env['PRIMARY_HOSTNAME'] + \"-\" + datetime.datetime.now().date().isoformat().replace(\"-\", \"\") + \".pem\")\n\t\tprint(\"Renamed\", system_certificate, \"to\", new_path, \"and created a symlink for the original location.\")\n\t\tshutil.move(system_certificate, new_path)\n\t\tos.symlink(new_path, system_certificate)\n\n\t# Flatten the directory structure. For any directory\n\t# that contains a single file named ssl_certificate.pem,\n\t# move the file out and name it the same as the directory,\n\t# and remove the directory.\n\tfor sslcert in glob.glob(os.path.join( env[\"STORAGE_ROOT\"], 'ssl/*/ssl_certificate.pem' )):\n\t\td = os.path.dirname(sslcert)\n\t\tif len(os.listdir(d)) == 1:\n\t\t\t# This certificate is the only file in that directory.\n\t\t\tnewname = os.path.join(env[\"STORAGE_ROOT\"], 'ssl', os.path.basename(d) + '.pem')\n\t\t\tif not os.path.exists(newname):\n\t\t\t\tshutil.move(sslcert, newname)\n\t\t\t\tos.rmdir(d)\n\ndef migration_11(env):\n\t# Archive the old Let's Encrypt account directory managed by free_tls_certificates\n\t# because we'll use that path now for the directory managed by certbot.\n\ttry:\n\t\told_path = os.path.join(env[\"STORAGE_ROOT\"], 'ssl', 'lets_encrypt')\n\t\tnew_path = os.path.join(env[\"STORAGE_ROOT\"], 'ssl', 'lets_encrypt-old')\n\t\tshutil.move(old_path, new_path)\n\texcept:\n\t\t# meh\n\t\tpass\n\ndef migration_12(env):\n\t# Upgrading to Carddav Roundcube plugin to version 3+, it requires the carddav_*\n # tables to be dropped.\n # Checking that the roundcube database already exists.\n if os.path.exists(os.path.join(env[\"STORAGE_ROOT\"], \"mail/roundcube/roundcube.sqlite\")):\n import sqlite3\n conn = sqlite3.connect(os.path.join(env[\"STORAGE_ROOT\"], \"mail/roundcube/roundcube.sqlite\"))\n c = conn.cursor()\n # Get a list of all the tables that begin with 'carddav_'\n c.execute(\"SELECT name FROM sqlite_master WHERE type = ? AND name LIKE ?\", ('table', 'carddav_%'))\n carddav_tables = c.fetchall()\n # If there were tables that begin with 'carddav_', drop them\n if carddav_tables:\n for table in carddav_tables:\n try:\n table = table[0]\n c = conn.cursor()\n dropcmd = \"DROP TABLE %s\" % table\n c.execute(dropcmd)\n except:\n print(\"Failed to drop table\", table)\n # Save.\n conn.commit()\n conn.close()\n\n # Delete all sessions, requiring users to login again to recreate carddav_*\n # databases\n conn = sqlite3.connect(os.path.join(env[\"STORAGE_ROOT\"], \"mail/roundcube/roundcube.sqlite\"))\n c = conn.cursor()\n c.execute(\"delete from session;\")\n conn.commit()\n conn.close()\n\ndef migration_13(env):\n\t# Add the \"mfa\" table for configuring MFA for login to the control panel.\n\tdb = os.path.join(env[\"STORAGE_ROOT\"], 'mail/users.sqlite')\n\tshell(\"check_call\", [\"sqlite3\", db, \"CREATE TABLE mfa (id INTEGER PRIMARY KEY AUTOINCREMENT, user_id INTEGER NOT NULL, type TEXT NOT NULL, secret TEXT NOT NULL, mru_token TEXT, label TEXT, FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE CASCADE);\"])\n\ndef migration_14(env):\n\t# Add the \"auto_aliases\" table.\n\tdb = os.path.join(env[\"STORAGE_ROOT\"], 'mail/users.sqlite')\n\tshell(\"check_call\", [\"sqlite3\", db, \"CREATE TABLE auto_aliases (id INTEGER PRIMARY KEY AUTOINCREMENT, source TEXT NOT NULL UNIQUE, destination TEXT NOT NULL, permitted_senders TEXT);\"])\n\n###########################################################\n\ndef get_current_migration():\n\tver = 0\n\twhile True:\n\t\tnext_ver = (ver + 1)\n\t\tmigration_func = globals().get(\"migration_%d\" % next_ver)\n\t\tif not migration_func:\n\t\t\treturn ver\n\t\tver = next_ver\n\ndef run_migrations():\n\tif not os.access(\"/etc/mailinabox.conf\", os.W_OK, effective_ids=True):\n\t\tprint(\"This script must be run as root.\", file=sys.stderr)\n\t\tsys.exit(1)\n\n\tenv = load_environment()\n\n\tmigration_id_file = os.path.join(env['STORAGE_ROOT'], 'mailinabox.version')\n\tmigration_id = None\n\tif os.path.exists(migration_id_file):\n\t\twith open(migration_id_file, encoding='utf-8') as f:\n\t\t\tmigration_id = f.read().strip()\n\n\tif migration_id is None:\n\t\t# Load the legacy location of the migration ID. We'll drop support\n\t\t# for this eventually.\n\t\tmigration_id = env.get(\"MIGRATIONID\")\n\n\tif migration_id is None:\n\t\tprint()\n\t\tprint(f\"{migration_id_file} file doesn't exists. Skipping migration...\")\n\t\treturn\n\n\tourver = int(migration_id)\n\n\twhile True:\n\t\tnext_ver = (ourver + 1)\n\t\tmigration_func = globals().get(\"migration_%d\" % next_ver)\n\n\t\tif not migration_func:\n\t\t\t# No more migrations to run.\n\t\t\tbreak\n\n\t\tprint()\n\t\tprint(\"Running migration to Mail-in-a-Box #%d...\" % next_ver)\n\n\t\ttry:\n\t\t\tmigration_func(env)\n\t\texcept Exception as e:\n\t\t\tprint()\n\t\t\tprint(\"Error running the migration script:\")\n\t\t\tprint()\n\t\t\tprint(e)\n\t\t\tprint()\n\t\t\tprint(\"Your system may be in an inconsistent state now. We're terribly sorry. A re-install from a backup might be the best way to continue.\")\n\t\t\tsys.exit(1)\n\n\t\tourver = next_ver\n\n\t\t# Write out our current version now. Do this sooner rather than later\n\t\t# in case of any problems.\n\t\twith open(migration_id_file, \"w\", encoding='utf-8') as f:\n\t\t\tf.write(str(ourver) + \"\\n\")\n\n\t\t# Delete the legacy location of this field.\n\t\tif \"MIGRATIONID\" in env:\n\t\t\tdel env[\"MIGRATIONID\"]\n\t\t\tsave_environment(env)\n\n\t\t# iterate and try next version...\n\nif __name__ == \"__main__\":\n\tif sys.argv[-1] == \"--current\":\n\t\t# Return the number of the highest migration.\n\t\tprint(str(get_current_migration()))\n\telif sys.argv[-1] == \"--migrate\":\n\t\t# Perform migrations.\n\t\trun_migrations()\n\n\n\n\nFile: management/mfa.py\n\nimport base64\nimport hmac\nimport io\nimport os\nimport pyotp\nimport qrcode\n\nfrom mailconfig import open_database\n\ndef get_user_id(email, c):\n\tc.execute('SELECT id FROM users WHERE email=?', (email,))\n\tr = c.fetchone()\n\tif not r: raise ValueError(\"User does not exist.\")\n\treturn r[0]\n\ndef get_mfa_state(email, env):\n\tc = open_database(env)\n\tc.execute('SELECT id, type, secret, mru_token, label FROM mfa WHERE user_id=?', (get_user_id(email, c),))\n\treturn [\n\t\t{ \"id\": r[0], \"type\": r[1], \"secret\": r[2], \"mru_token\": r[3], \"label\": r[4] }\n\t\tfor r in c.fetchall()\n\t]\n\ndef get_public_mfa_state(email, env):\n\tmfa_state = get_mfa_state(email, env)\n\treturn [\n\t\t{ \"id\": s[\"id\"], \"type\": s[\"type\"], \"label\": s[\"label\"] }\n\t\tfor s in mfa_state\n\t]\n\ndef get_hash_mfa_state(email, env):\n\tmfa_state = get_mfa_state(email, env)\n\treturn [\n\t\t{ \"id\": s[\"id\"], \"type\": s[\"type\"], \"secret\": s[\"secret\"] }\n\t\tfor s in mfa_state\n\t]\n\ndef enable_mfa(email, type, secret, token, label, env):\n\tif type == \"totp\":\n\t\tvalidate_totp_secret(secret)\n\t\t# Sanity check with the provide current token.\n\t\ttotp = pyotp.TOTP(secret)\n\t\tif not totp.verify(token, valid_window=1):\n\t\t\tmsg = \"Invalid token.\"\n\t\t\traise ValueError(msg)\n\telse:\n\t\tmsg = \"Invalid MFA type.\"\n\t\traise ValueError(msg)\n\n\tconn, c = open_database(env, with_connection=True)\n\tc.execute('INSERT INTO mfa (user_id, type, secret, label) VALUES (?, ?, ?, ?)', (get_user_id(email, c), type, secret, label))\n\tconn.commit()\n\ndef set_mru_token(email, mfa_id, token, env):\n\tconn, c = open_database(env, with_connection=True)\n\tc.execute('UPDATE mfa SET mru_token=? WHERE user_id=? AND id=?', (token, get_user_id(email, c), mfa_id))\n\tconn.commit()\n\ndef disable_mfa(email, mfa_id, env):\n\tconn, c = open_database(env, with_connection=True)\n\tif mfa_id is None:\n\t\t# Disable all MFA for a user.\n\t\tc.execute('DELETE FROM mfa WHERE user_id=?', (get_user_id(email, c),))\n\telse:\n\t\t# Disable a particular MFA mode for a user.\n\t\tc.execute('DELETE FROM mfa WHERE user_id=? AND id=?', (get_user_id(email, c), mfa_id))\n\tconn.commit()\n\treturn c.rowcount > 0\n\ndef validate_totp_secret(secret):\n\tif not isinstance(secret, str) or secret.strip() == \"\":\n\t\tmsg = \"No secret provided.\"\n\t\traise ValueError(msg)\n\tif len(secret) != 32:\n\t\tmsg = \"Secret should be a 32 characters base32 string\"\n\t\traise ValueError(msg)\n\ndef provision_totp(email, env):\n\t# Make a new secret.\n\tsecret = base64.b32encode(os.urandom(20)).decode('utf-8')\n\tvalidate_totp_secret(secret) # sanity check\n\n\t# Make a URI that we encode within a QR code.\n\turi = pyotp.TOTP(secret).provisioning_uri(\n\t\tname=email,\n\t\tissuer_name=env[\"PRIMARY_HOSTNAME\"] + \" Mail-in-a-Box Control Panel\"\n\t)\n\n\t# Generate a QR code as a base64-encode PNG image.\n\tqr = qrcode.make(uri)\n\tbyte_arr = io.BytesIO()\n\tqr.save(byte_arr, format='PNG')\n\tpng_b64 = base64.b64encode(byte_arr.getvalue()).decode('utf-8')\n\n\treturn {\n\t\t\"type\": \"totp\",\n\t\t\"secret\": secret,\n\t\t\"qr_code_base64\": png_b64\n\t}\n\ndef validate_auth_mfa(email, request, env):\n\t# Validates that a login request satisfies any MFA modes\n\t# that have been enabled for the user's account. Returns\n\t# a tuple (status, [hints]). status is True for a successful\n\t# MFA login, False for a missing token. If status is False,\n\t# hints is an array of codes that indicate what the user\n\t# can try. Possible codes are:\n\t# \"missing-totp-token\"\n\t# \"invalid-totp-token\"\n\n\tmfa_state = get_mfa_state(email, env)\n\n\t# If no MFA modes are added, return True.\n\tif len(mfa_state) == 0:\n\t\treturn (True, [])\n\n\t# Try the enabled MFA modes.\n\thints = set()\n\tfor mfa_mode in mfa_state:\n\t\tif mfa_mode[\"type\"] == \"totp\":\n\t\t\t# Check that a token is present in the X-Auth-Token header.\n\t\t\t# If not, give a hint that one can be supplied.\n\t\t\ttoken = request.headers.get('x-auth-token')\n\t\t\tif not token:\n\t\t\t\thints.add(\"missing-totp-token\")\n\t\t\t\tcontinue\n\n\t\t\t# Check for a replay attack.\n\t\t\tif hmac.compare_digest(token, mfa_mode['mru_token'] or \"\"):\n\t\t\t\t# If the token fails, skip this MFA mode.\n\t\t\t\thints.add(\"invalid-totp-token\")\n\t\t\t\tcontinue\n\n\t\t\t# Check the token.\n\t\t\ttotp = pyotp.TOTP(mfa_mode[\"secret\"])\n\t\t\tif not totp.verify(token, valid_window=1):\n\t\t\t\thints.add(\"invalid-totp-token\")\n\t\t\t\tcontinue\n\n\t\t\t# On success, record the token to prevent a replay attack.\n\t\t\tset_mru_token(email, mfa_mode['id'], token, env)\n\t\t\treturn (True, [])\n\n\t# On a failed login, indicate failure and any hints for what the user can do instead.\n\treturn (False, list(hints))\n\n\n\nFile: management/auth.py\n\nimport base64, hmac, json, secrets\nfrom datetime import timedelta\n\nfrom expiringdict import ExpiringDict\n\nimport utils\nfrom mailconfig import get_mail_password, get_mail_user_privileges\nfrom mfa import get_hash_mfa_state, validate_auth_mfa\n\nDEFAULT_KEY_PATH = '/var/lib/mailinabox/api.key'\nDEFAULT_AUTH_REALM = 'Mail-in-a-Box Management Server'\n\nclass AuthService:\n\tdef __init__(self):\n\t\tself.auth_realm = DEFAULT_AUTH_REALM\n\t\tself.key_path = DEFAULT_KEY_PATH\n\t\tself.max_session_duration = timedelta(days=2)\n\n\t\tself.init_system_api_key()\n\t\tself.sessions = ExpiringDict(max_len=64, max_age_seconds=self.max_session_duration.total_seconds())\n\n\tdef init_system_api_key(self):\n\t\t\"\"\"Write an API key to a local file so local processes can use the API\"\"\"\n\n\t\twith open(self.key_path, encoding='utf-8') as file:\n\t\t\tself.key = file.read()\n\n\tdef authenticate(self, request, env, login_only=False, logout=False):\n\t\t\"\"\"Test if the HTTP Authorization header's username matches the system key, a session key,\n\t\tor if the username/password passed in the header matches a local user.\n\t\tReturns a tuple of the user's email address and list of user privileges (e.g.\n\t\t('my@email', []) or ('my@email', ['admin']); raises a ValueError on login failure.\n\t\tIf the user used the system API key, the user's email is returned as None since\n\t\tthis key is not associated with a user.\"\"\"\n\n\t\tdef parse_http_authorization_basic(header):\n\t\t\tdef decode(s):\n\t\t\t\treturn base64.b64decode(s.encode('ascii')).decode('ascii')\n\t\t\tif \" \" not in header:\n\t\t\t\treturn None, None\n\t\t\tscheme, credentials = header.split(maxsplit=1)\n\t\t\tif scheme != 'Basic':\n\t\t\t\treturn None, None\n\t\t\tcredentials = decode(credentials)\n\t\t\tif \":\" not in credentials:\n\t\t\t\treturn None, None\n\t\t\tusername, password = credentials.split(':', maxsplit=1)\n\t\t\treturn username, password\n\n\t\tusername, password = parse_http_authorization_basic(request.headers.get('Authorization', ''))\n\t\tif username in {None, \"\"}:\n\t\t\tmsg = \"Authorization header invalid.\"\n\t\t\traise ValueError(msg)\n\n\t\tif username.strip() == \"\" and password.strip() == \"\":\n\t\t\tmsg = \"No email address, password, session key, or API key provided.\"\n\t\t\traise ValueError(msg)\n\n\t\t# If user passed the system API key, grant administrative privs. This key\n\t\t# is not associated with a user.\n\t\tif username == self.key and not login_only:\n\t\t\treturn (None, [\"admin\"])\n\n\t\t# If the password corresponds with a session token for the user, grant access for that user.\n\t\tif self.get_session(username, password, \"login\", env) and not login_only:\n\t\t\tsessionid = password\n\t\t\tsession = self.sessions[sessionid]\n\t\t\tif logout:\n\t\t\t\t# Clear the session.\n\t\t\t\tdel self.sessions[sessionid]\n\t\t\telse:\n\t\t\t\t# Re-up the session so that it does not expire.\n\t\t\t\tself.sessions[sessionid] = session\n\n\t\t# If no password was given, but a username was given, we're missing some information.\n\t\telif password.strip() == \"\":\n\t\t\tmsg = \"Enter a password.\"\n\t\t\traise ValueError(msg)\n\n\t\telse:\n\t\t\t# The user is trying to log in with a username and a password\n\t\t\t# (and possibly a MFA token). On failure, an exception is raised.\n\t\t\tself.check_user_auth(username, password, request, env)\n\n\t\t# Get privileges for authorization. This call should never fail because by this\n\t\t# point we know the email address is a valid user --- unless the user has been\n\t\t# deleted after the session was granted. On error the call will return a tuple\n\t\t# of an error message and an HTTP status code.\n\t\tprivs = get_mail_user_privileges(username, env)\n\t\tif isinstance(privs, tuple): raise ValueError(privs[0])\n\n\t\t# Return the authorization information.\n\t\treturn (username, privs)\n\n\tdef check_user_auth(self, email, pw, request, env):\n\t\t# Validate a user's login email address and password. If MFA is enabled,\n\t\t# check the MFA token in the X-Auth-Token header.\n\t\t#\n\t\t# On login failure, raises a ValueError with a login error message. On\n\t\t# success, nothing is returned.\n\n\t\t# Authenticate.\n\t\ttry:\n\t\t\t# Get the hashed password of the user. Raise a ValueError if the\n\t\t\t# email address does not correspond to a user. But wrap it in the\n\t\t\t# same exception as if a password fails so we don't easily reveal\n\t\t\t# if an email address is valid.\n\t\t\tpw_hash = get_mail_password(email, env)\n\n\t\t\t# Use 'doveadm pw' to check credentials. doveadm will return\n\t\t\t# a non-zero exit status if the credentials are no good,\n\t\t\t# and check_call will raise an exception in that case.\n\t\t\tutils.shell('check_call', [\n\t\t\t\t\"/usr/bin/doveadm\", \"pw\",\n\t\t\t\t\"-p\", pw,\n\t\t\t\t\"-t\", pw_hash,\n\t\t\t\t])\n\t\texcept:\n\t\t\t# Login failed.\n\t\t\tmsg = \"Incorrect email address or password.\"\n\t\t\traise ValueError(msg)\n\n\t\t# If MFA is enabled, check that MFA passes.\n\t\tstatus, hints = validate_auth_mfa(email, request, env)\n\t\tif not status:\n\t\t\t# Login valid. Hints may have more info.\n\t\t\traise ValueError(\",\".join(hints))\n\n\tdef create_user_password_state_token(self, email, env):\n\t\t# Create a token that changes if the user's password or MFA options change\n\t\t# so that sessions become invalid if any of that information changes.\n\t\tmsg = get_mail_password(email, env).encode(\"utf8\")\n\n\t\t# Add to the message the current MFA state, which is a list of MFA information.\n\t\t# Turn it into a string stably.\n\t\tmsg += b\" \" + json.dumps(get_hash_mfa_state(email, env), sort_keys=True).encode(\"utf8\")\n\n\t\t# Make a HMAC using the system API key as a hash key.\n\t\thash_key = self.key.encode('ascii')\n\t\treturn hmac.new(hash_key, msg, digestmod=\"sha256\").hexdigest()\n\n\tdef create_session_key(self, username, env, type=None):\n\t\t# Create a new session.\n\t\ttoken = secrets.token_hex(32)\n\t\tself.sessions[token] = {\n\t\t\t\"email\": username,\n\t\t\t\"password_token\": self.create_user_password_state_token(username, env),\n\t\t\t\"type\": type,\n\t\t}\n\t\treturn token\n\n\tdef get_session(self, user_email, session_key, session_type, env):\n\t\tif session_key not in self.sessions: return None\n\t\tsession = self.sessions[session_key]\n\t\tif session_type == \"login\" and session[\"email\"] != user_email: return None\n\t\tif session[\"type\"] != session_type: return None\n\t\tif session[\"password_token\"] != self.create_user_password_state_token(session[\"email\"], env): return None\n\t\treturn session\n\n\n\nFile: management/ssl_certificates.py\n\n#!/usr/local/lib/mailinabox/env/bin/python\n# Utilities for installing and selecting SSL certificates.\n\nimport os, os.path, re, shutil, subprocess, tempfile\n\nfrom utils import shell, safe_domain_name, sort_domains\nimport functools\nimport operator\n\n# SELECTING SSL CERTIFICATES FOR USE IN WEB\n\ndef get_ssl_certificates(env):\n\t# Scan all of the installed SSL certificates and map every domain\n\t# that the certificates are good for to the best certificate for\n\t# the domain.\n\n\tfrom cryptography.hazmat.primitives.asymmetric.rsa import RSAPrivateKey\n\tfrom cryptography.x509 import Certificate\n\n\t# The certificates are all stored here:\n\tssl_root = os.path.join(env[\"STORAGE_ROOT\"], 'ssl')\n\n\t# List all of the files in the SSL directory and one level deep.\n\tdef get_file_list():\n\t\tif not os.path.exists(ssl_root):\n\t\t\treturn\n\t\tfor fn in os.listdir(ssl_root):\n\t\t\tif fn == 'ssl_certificate.pem':\n\t\t\t\t# This is always a symbolic link\n\t\t\t\t# to the certificate to use for\n\t\t\t\t# PRIMARY_HOSTNAME. Don't let it\n\t\t\t\t# be eligible for use because we\n\t\t\t\t# could end up creating a symlink\n\t\t\t\t# to itself --- we want to find\n\t\t\t\t# the cert that it should be a\n\t\t\t\t# symlink to.\n\t\t\t\tcontinue\n\t\t\tfn = os.path.join(ssl_root, fn)\n\t\t\tif os.path.isfile(fn):\n\t\t\t\tyield fn\n\t\t\telif os.path.isdir(fn):\n\t\t\t\tfor fn1 in os.listdir(fn):\n\t\t\t\t\tfn1 = os.path.join(fn, fn1)\n\t\t\t\t\tif os.path.isfile(fn1):\n\t\t\t\t\t\tyield fn1\n\n\t# Remember stuff.\n\tprivate_keys = { }\n\tcertificates = [ ]\n\n\t# Scan each of the files to find private keys and certificates.\n\t# We must load all of the private keys first before processing\n\t# certificates so that we can check that we have a private key\n\t# available before using a certificate.\n\tfor fn in get_file_list():\n\t\ttry:\n\t\t\tpem = load_pem(load_cert_chain(fn)[0])\n\t\texcept ValueError:\n\t\t\t# Not a valid PEM format for a PEM type we care about.\n\t\t\tcontinue\n\n\t\t# Is it a private key?\n\t\tif isinstance(pem, RSAPrivateKey):\n\t\t\tprivate_keys[pem.public_key().public_numbers()] = { \"filename\": fn, \"key\": pem }\n\n\t\t# Is it a certificate?\n\t\tif isinstance(pem, Certificate):\n\t\t\tcertificates.append({ \"filename\": fn, \"cert\": pem })\n\n\t# Process the certificates.\n\tdomains = { }\n\tfor cert in certificates:\n\t\t# What domains is this certificate good for?\n\t\tcert_domains, primary_domain = get_certificate_domains(cert[\"cert\"])\n\t\tcert[\"primary_domain\"] = primary_domain\n\n\t\t# Is there a private key file for this certificate?\n\t\tprivate_key = private_keys.get(cert[\"cert\"].public_key().public_numbers())\n\t\tif not private_key:\n\t\t\tcontinue\n\t\tcert[\"private_key\"] = private_key\n\n\t\t# Add this cert to the list of certs usable for the domains.\n\t\tfor domain in cert_domains:\n\t\t\t# The primary hostname can only use a certificate mapped\n\t\t\t# to the system private key.\n\t\t\tif domain == env['PRIMARY_HOSTNAME'] and cert[\"private_key\"][\"filename\"] != os.path.join(env['STORAGE_ROOT'], 'ssl', 'ssl_private_key.pem'):\n\t\t\t\tcontinue\n\n\t\t\tdomains.setdefault(domain, []).append(cert)\n\n\t# Sort the certificates to prefer good ones.\n\timport datetime\n\tnow = datetime.datetime.utcnow()\n\tret = { }\n\tfor domain, cert_list in domains.items():\n\t\t#for c in cert_list: print(domain, c.not_valid_before, c.not_valid_after, \"(\"+str(now)+\")\", c.issuer, c.subject, c._filename)\n\t\tcert_list.sort(key = lambda cert : (\n\t\t\t# must be valid NOW\n\t\t\tcert[\"cert\"].not_valid_before <= now <= cert[\"cert\"].not_valid_after,\n\n\t\t\t# prefer one that is not self-signed\n\t\t\tcert[\"cert\"].issuer != cert[\"cert\"].subject,\n\n\t\t\t###########################################################\n\t\t\t# The above lines ensure that valid certificates are chosen\n\t\t\t# over invalid certificates. The lines below choose between\n\t\t\t# multiple valid certificates available for this domain.\n\t\t\t###########################################################\n\n\t\t\t# prefer one with the expiration furthest into the future so\n\t\t\t# that we can easily rotate to new certs as we get them\n\t\t\tcert[\"cert\"].not_valid_after,\n\n\t\t\t###########################################################\n\t\t\t# We always choose the certificate that is good for the\n\t\t\t# longest period of time. This is important for how we\n\t\t\t# provision certificates for Let's Encrypt. To ensure that\n\t\t\t# we don't re-provision every night, we have to ensure that\n\t\t\t# if we choose to provison a certificate that it will\n\t\t\t# *actually* be used so the provisioning logic knows it\n\t\t\t# doesn't still need to provision a certificate for the\n\t\t\t# domain.\n\t\t\t###########################################################\n\n\t\t\t# in case a certificate is installed in multiple paths,\n\t\t\t# prefer the... lexicographically last one?\n\t\t\tcert[\"filename\"],\n\n\t\t), reverse=True)\n\t\tcert = cert_list.pop(0)\n\t\tret[domain] = {\n\t\t\t\"private-key\": cert[\"private_key\"][\"filename\"],\n\t\t\t\"certificate\": cert[\"filename\"],\n\t\t\t\"primary-domain\": cert[\"primary_domain\"],\n\t\t\t\"certificate_object\": cert[\"cert\"],\n\t\t\t}\n\n\treturn ret\n\ndef get_domain_ssl_files(domain, ssl_certificates, env, allow_missing_cert=False, use_main_cert=True):\n\tif use_main_cert or not allow_missing_cert:\n\t\t# Get the system certificate info.\n\t\tssl_private_key = os.path.join(os.path.join(env[\"STORAGE_ROOT\"], 'ssl', 'ssl_private_key.pem'))\n\t\tssl_certificate = os.path.join(os.path.join(env[\"STORAGE_ROOT\"], 'ssl', 'ssl_certificate.pem'))\n\t\tsystem_certificate = {\n\t\t\t\"private-key\": ssl_private_key,\n\t\t\t\"certificate\": ssl_certificate,\n\t\t\t\"primary-domain\": env['PRIMARY_HOSTNAME'],\n\t\t\t\"certificate_object\": load_pem(load_cert_chain(ssl_certificate)[0]),\n\t\t}\n\n\tif use_main_cert and domain == env['PRIMARY_HOSTNAME']:\n\t\t# The primary domain must use the server certificate because\n\t\t# it is hard-coded in some service configuration files.\n\t\treturn system_certificate\n\n\twildcard_domain = re.sub(r\"^[^\\.]+\", \"*\", domain)\n\tif domain in ssl_certificates:\n\t\treturn ssl_certificates[domain]\n\telif wildcard_domain in ssl_certificates:\n\t\treturn ssl_certificates[wildcard_domain]\n\telif not allow_missing_cert:\n\t\t# No valid certificate is available for this domain! Return default files.\n\t\treturn system_certificate\n\telse:\n\t\t# No valid certificate is available for this domain.\n\t\treturn None\n\n\n# PROVISIONING CERTIFICATES FROM LETSENCRYPT\n\ndef get_certificates_to_provision(env, limit_domains=None, show_valid_certs=True):\n\t# Get a set of domain names that we can provision certificates for\n\t# using certbot. We start with domains that the box is serving web\n\t# for and subtract:\n\t# * domains not in limit_domains if limit_domains is not empty\n\t# * domains with custom \"A\" records, i.e. they are hosted elsewhere\n\t# * domains with actual \"A\" records that point elsewhere (misconfiguration)\n\t# * domains that already have certificates that will be valid for a while\n\n\tfrom web_update import get_web_domains\n\tfrom status_checks import query_dns, normalize_ip\n\n\texisting_certs = get_ssl_certificates(env)\n\n\tplausible_web_domains = get_web_domains(env, exclude_dns_elsewhere=False)\n\tactual_web_domains = get_web_domains(env)\n\n\tdomains_to_provision = set()\n\tdomains_cant_provision = { }\n\n\tfor domain in plausible_web_domains:\n\t\t# Skip domains that the user doesn't want to provision now.\n\t\tif limit_domains and domain not in limit_domains:\n\t\t\tcontinue\n\n\t\t# Check that there isn't an explicit A/AAAA record.\n\t\tif domain not in actual_web_domains:\n\t\t\tdomains_cant_provision[domain] = \"The domain has a custom DNS A/AAAA record that points the domain elsewhere, so there is no point to installing a TLS certificate here and we could not automatically provision one anyway because provisioning requires access to the website (which isn't here).\"\n\n\t\t# Check that the DNS resolves to here.\n\t\telse:\n\n\t\t\t# Does the domain resolve to this machine in public DNS? If not,\n\t\t\t# we can't do domain control validation. For IPv6 is configured,\n\t\t\t# make sure both IPv4 and IPv6 are correct because we don't know\n\t\t\t# how Let's Encrypt will connect.\n\t\t\tbad_dns = []\n\t\t\tfor rtype, value in [(\"A\", env[\"PUBLIC_IP\"]), (\"AAAA\", env.get(\"PUBLIC_IPV6\"))]:\n\t\t\t\tif not value: continue # IPv6 is not configured\n\t\t\t\tresponse = query_dns(domain, rtype)\n\t\t\t\tif response != normalize_ip(value):\n\t\t\t\t\tbad_dns.append(f\"{response} ({rtype})\")\n\n\t\t\tif bad_dns:\n\t\t\t\tdomains_cant_provision[domain] = \"The domain name does not resolve to this machine: \" \\\n\t\t\t\t\t+ (\", \".join(bad_dns)) \\\n\t\t\t\t\t+ \".\"\n\n\t\t\telse:\n\t\t\t\t# DNS is all good.\n\n\t\t\t\t# Check for a good existing cert.\n\t\t\t\texisting_cert = get_domain_ssl_files(domain, existing_certs, env, use_main_cert=False, allow_missing_cert=True)\n\t\t\t\tif existing_cert:\n\t\t\t\t\texisting_cert_check = check_certificate(domain, existing_cert['certificate'], existing_cert['private-key'],\n\t\t\t\t\t\twarn_if_expiring_soon=14)\n\t\t\t\t\tif existing_cert_check[0] == \"OK\":\n\t\t\t\t\t\tif show_valid_certs:\n\t\t\t\t\t\t\tdomains_cant_provision[domain] = \"The domain has a valid certificate already. ({} Certificate: {}, private key {})\".format(\n\t\t\t\t\t\t\t\texisting_cert_check[1],\n\t\t\t\t\t\t\t\texisting_cert['certificate'],\n\t\t\t\t\t\t\t\texisting_cert['private-key'])\n\t\t\t\t\t\tcontinue\n\n\t\t\t\tdomains_to_provision.add(domain)\n\n\treturn (domains_to_provision, domains_cant_provision)\n\ndef provision_certificates(env, limit_domains):\n\t# What domains should we provision certificates for? And what\n\t# errors prevent provisioning for other domains.\n\tdomains, domains_cant_provision = get_certificates_to_provision(env, limit_domains=limit_domains)\n\n\t# Build a list of what happened on each domain or domain-set.\n\tret = []\n\tfor domain, error in domains_cant_provision.items():\n\t\tret.append({\n\t\t\t\"domains\": [domain],\n\t\t\t\"log\": [error],\n\t\t\t\"result\": \"skipped\",\n\t\t})\n\n\t# Break into groups by DNS zone: Group every domain with its parent domain, if\n\t# its parent domain is in the list of domains to request a certificate for.\n\t# Start with the zones so that if the zone doesn't need a certificate itself,\n\t# its children will still be grouped together. Sort the provision domains to\n\t# put parents ahead of children.\n\t# Since Let's Encrypt requests are limited to 100 domains at a time,\n\t# we'll create a list of lists of domains where the inner lists have\n\t# at most 100 items. By sorting we also get the DNS zone domain as the first\n\t# entry in each list (unless we overflow beyond 100) which ends up as the\n\t# primary domain listed in each certificate.\n\tfrom dns_update import get_dns_zones\n\tcerts = { }\n\tfor zone, _zonefile in get_dns_zones(env):\n\t\tcerts[zone] = [[]]\n\tfor domain in sort_domains(domains, env):\n\t\t# Does the domain end with any domain we've seen so far.\n\t\tfor parent in certs:\n\t\t\tif domain.endswith(\".\" + parent):\n\t\t\t\t# Add this to the parent's list of domains.\n\t\t\t\t# Start a new group if the list already has\n\t\t\t\t# 100 items.\n\t\t\t\tif len(certs[parent][-1]) == 100:\n\t\t\t\t\tcerts[parent].append([])\n\t\t\t\tcerts[parent][-1].append(domain)\n\t\t\t\tbreak\n\t\telse:\n\t\t\t# This domain is not a child of any domain we've seen yet, so\n\t\t\t# start a new group. This shouldn't happen since every zone\n\t\t\t# was already added.\n\t\t\tcerts[domain] = [[domain]]\n\n\t# Flatten to a list of lists of domains (from a mapping). Remove empty\n\t# lists (zones with no domains that need certs).\n\tcerts = functools.reduce(operator.iadd, certs.values(), [])\n\tcerts = [_ for _ in certs if len(_) > 0]\n\n\t# Prepare to provision.\n\n\t# Where should we put our Let's Encrypt account info and state cache.\n\taccount_path = os.path.join(env['STORAGE_ROOT'], 'ssl/lets_encrypt')\n\tif not os.path.exists(account_path):\n\t\tos.mkdir(account_path)\n\n\t# Provision certificates.\n\tfor domain_list in certs:\n\t\tret.append({\n\t\t\t\"domains\": domain_list,\n\t\t\t\"log\": [],\n\t\t})\n\t\ttry:\n\t\t\t# Create a CSR file for our master private key so that certbot\n\t\t\t# uses our private key.\n\t\t\tkey_file = os.path.join(env['STORAGE_ROOT'], 'ssl', 'ssl_private_key.pem')\n\t\t\twith tempfile.NamedTemporaryFile() as csr_file:\n\t\t\t\t# We could use openssl, but certbot requires\n\t\t\t\t# that the CN domain and SAN domains match\n\t\t\t\t# the domain list passed to certbot, and adding\n\t\t\t\t# SAN domains openssl req is ridiculously complicated.\n\t\t\t\t# subprocess.check_output([\n\t\t\t\t# \t\"openssl\", \"req\", \"-new\",\n\t\t\t\t# \t\"-key\", key_file,\n\t\t\t\t# \t\"-out\", csr_file.name,\n\t\t\t\t# \t\"-subj\", \"/CN=\" + domain_list[0],\n\t\t\t\t# \t\"-sha256\" ])\n\t\t\t\tfrom cryptography import x509\n\t\t\t\tfrom cryptography.hazmat.backends import default_backend\n\t\t\t\tfrom cryptography.hazmat.primitives.serialization import Encoding\n\t\t\t\tfrom cryptography.hazmat.primitives import hashes\n\t\t\t\tfrom cryptography.x509.oid import NameOID\n\t\t\t\tbuilder = x509.CertificateSigningRequestBuilder()\n\t\t\t\tbuilder = builder.subject_name(x509.Name([ x509.NameAttribute(NameOID.COMMON_NAME, domain_list[0]) ]))\n\t\t\t\tbuilder = builder.add_extension(x509.BasicConstraints(ca=False, path_length=None), critical=True)\n\t\t\t\tbuilder = builder.add_extension(x509.SubjectAlternativeName(\n\t\t\t\t\t[x509.DNSName(d) for d in domain_list]\n\t\t\t\t), critical=False)\n\t\t\t\trequest = builder.sign(load_pem(load_cert_chain(key_file)[0]), hashes.SHA256(), default_backend())\n\t\t\t\twith open(csr_file.name, \"wb\") as f:\n\t\t\t\t\tf.write(request.public_bytes(Encoding.PEM))\n\n\t\t\t\t# Provision, writing to a temporary file.\n\t\t\t\twebroot = os.path.join(account_path, 'webroot')\n\t\t\t\tos.makedirs(webroot, exist_ok=True)\n\t\t\t\twith tempfile.TemporaryDirectory() as d:\n\t\t\t\t\tcert_file = os.path.join(d, 'cert_and_chain.pem')\n\t\t\t\t\tprint(\"Provisioning TLS certificates for \" + \", \".join(domain_list) + \".\")\n\t\t\t\t\tcertbotret = subprocess.check_output([\n\t\t\t\t\t\t\"certbot\",\n\t\t\t\t\t\t\"certonly\",\n\t\t\t\t\t\t#\"-v\", # just enough to see ACME errors\n\t\t\t\t\t\t\"--non-interactive\", # will fail if user hasn't registered during Mail-in-a-Box setup\n\n\t\t\t\t\t\t\"-d\", \",\".join(domain_list), # first will be main domain\n\n\t\t\t\t\t\t\"--csr\", csr_file.name, # use our private key; unfortunately this doesn't work with auto-renew so we need to save cert manually\n\t\t\t\t\t\t\"--cert-path\", os.path.join(d, 'cert'), # we only use the full chain\n\t\t\t\t\t\t\"--chain-path\", os.path.join(d, 'chain'), # we only use the full chain\n\t\t\t\t\t\t\"--fullchain-path\", cert_file,\n\n\t\t\t\t\t\t\"--webroot\", \"--webroot-path\", webroot,\n\n\t\t\t\t\t\t\"--config-dir\", account_path,\n\t\t\t\t\t\t#\"--staging\",\n\t\t\t\t\t], stderr=subprocess.STDOUT).decode(\"utf8\")\n\t\t\t\t\tinstall_cert_copy_file(cert_file, env)\n\n\t\t\tret[-1][\"log\"].append(certbotret)\n\t\t\tret[-1][\"result\"] = \"installed\"\n\t\texcept subprocess.CalledProcessError as e:\n\t\t\tret[-1][\"log\"].append(e.output.decode(\"utf8\"))\n\t\t\tret[-1][\"result\"] = \"error\"\n\t\texcept Exception as e:\n\t\t\tret[-1][\"log\"].append(str(e))\n\t\t\tret[-1][\"result\"] = \"error\"\n\n\t# Run post-install steps.\n\tret.extend(post_install_func(env))\n\n\t# Return what happened with each certificate request.\n\treturn ret\n\ndef provision_certificates_cmdline():\n\timport sys\n\tfrom exclusiveprocess import Lock\n\n\tfrom utils import load_environment\n\n\tLock(die=True).forever()\n\tenv = load_environment()\n\n\tquiet = False\n\tdomains = []\n\n\tfor arg in sys.argv[1:]:\n\t\tif arg == \"-q\":\n\t\t\tquiet = True\n\t\telse:\n\t\t\tdomains.append(arg)\n\n\t# Go.\n\tstatus = provision_certificates(env, limit_domains=domains)\n\n\t# Show what happened.\n\tfor request in status:\n\t\tif isinstance(request, str):\n\t\t\tprint(request)\n\t\telse:\n\t\t\tif quiet and request['result'] == 'skipped':\n\t\t\t\tcontinue\n\t\t\tprint(request['result'] + \":\", \", \".join(request['domains']) + \":\")\n\t\t\tfor line in request[\"log\"]:\n\t\t\t\tprint(line)\n\t\t\tprint()\n\n\n# INSTALLING A NEW CERTIFICATE FROM THE CONTROL PANEL\n\ndef create_csr(domain, ssl_key, country_code, env):\n\treturn shell(\"check_output\", [\n\t\t\t\t\"openssl\", \"req\", \"-new\",\n\t\t\t\t\"-key\", ssl_key,\n\t\t\t\t\"-sha256\",\n\t\t\t\t\"-subj\", f\"/C={country_code}/CN={domain}\"])\n\ndef install_cert(domain, ssl_cert, ssl_chain, env, raw=False):\n\t# Write the combined cert+chain to a temporary path and validate that it is OK.\n\t# The certificate always goes above the chain.\n\timport tempfile\n\tfd, fn = tempfile.mkstemp('.pem')\n\tos.write(fd, (ssl_cert + '\\n' + ssl_chain).encode(\"ascii\"))\n\tos.close(fd)\n\n\t# Do validation on the certificate before installing it.\n\tssl_private_key = os.path.join(os.path.join(env[\"STORAGE_ROOT\"], 'ssl', 'ssl_private_key.pem'))\n\tcert_status, cert_status_details = check_certificate(domain, fn, ssl_private_key)\n\tif cert_status != \"OK\":\n\t\tif cert_status == \"SELF-SIGNED\":\n\t\t\tcert_status = \"This is a self-signed certificate. I can't install that.\"\n\t\tos.unlink(fn)\n\t\tif cert_status_details is not None:\n\t\t\tcert_status += \" \" + cert_status_details\n\t\treturn cert_status\n\n\t# Copy certificate into ssl directory.\n\tinstall_cert_copy_file(fn, env)\n\n\t# Run post-install steps.\n\tret = post_install_func(env)\n\tif raw: return ret\n\treturn \"\\n\".join(ret)\n\n\ndef install_cert_copy_file(fn, env):\n\t# Where to put it?\n\t# Make a unique path for the certificate.\n\tfrom cryptography.hazmat.primitives import hashes\n\tfrom binascii import hexlify\n\tcert = load_pem(load_cert_chain(fn)[0])\n\t_all_domains, cn = get_certificate_domains(cert)\n\tpath = \"{}-{}-{}.pem\".format(\n\t\tsafe_domain_name(cn), # common name, which should be filename safe because it is IDNA-encoded, but in case of a malformed cert make sure it's ok to use as a filename\n\t\tcert.not_valid_after.date().isoformat().replace(\"-\", \"\"), # expiration date\n\t\thexlify(cert.fingerprint(hashes.SHA256())).decode(\"ascii\")[0:8], # fingerprint prefix\n\t\t)\n\tssl_certificate = os.path.join(os.path.join(env[\"STORAGE_ROOT\"], 'ssl', path))\n\n\t# Install the certificate.\n\tos.makedirs(os.path.dirname(ssl_certificate), exist_ok=True)\n\tshutil.move(fn, ssl_certificate)\n\n\ndef post_install_func(env):\n\tret = []\n\n\t# Get the certificate to use for PRIMARY_HOSTNAME.\n\tssl_certificates = get_ssl_certificates(env)\n\tcert = get_domain_ssl_files(env['PRIMARY_HOSTNAME'], ssl_certificates, env, use_main_cert=False)\n\tif not cert:\n\t\t# Ruh-row, we don't have any certificate usable\n\t\t# for the primary hostname.\n\t\tret.append(\"there is no valid certificate for \" + env['PRIMARY_HOSTNAME'])\n\n\t# Symlink the best cert for PRIMARY_HOSTNAME to the system\n\t# certificate path, which is hard-coded for various purposes, and then\n\t# restart postfix and dovecot.\n\tsystem_ssl_certificate = os.path.join(os.path.join(env[\"STORAGE_ROOT\"], 'ssl', 'ssl_certificate.pem'))\n\tif cert and os.readlink(system_ssl_certificate) != cert['certificate']:\n\t\t# Update symlink.\n\t\tret.append(\"updating primary certificate\")\n\t\tssl_certificate = cert['certificate']\n\t\tos.unlink(system_ssl_certificate)\n\t\tos.symlink(ssl_certificate, system_ssl_certificate)\n\n\t\t# Restart postfix and dovecot so they pick up the new file.\n\t\tshell('check_call', [\"/usr/sbin/service\", \"postfix\", \"restart\"])\n\t\tshell('check_call', [\"/usr/sbin/service\", \"dovecot\", \"restart\"])\n\t\tret.append(\"mail services restarted\")\n\n\t\t# The DANE TLSA record will remain valid so long as the private key\n\t\t# hasn't changed. We don't ever change the private key automatically.\n\t\t# If the user does it, they must manually update DNS.\n\n\t# Update the web configuration so nginx picks up the new certificate file.\n\tfrom web_update import do_web_update\n\tret.append( do_web_update(env) )\n\n\treturn ret\n\n# VALIDATION OF CERTIFICATES\n\ndef check_certificate(domain, ssl_certificate, ssl_private_key, warn_if_expiring_soon=10, rounded_time=False, just_check_domain=False):\n\t# Check that the ssl_certificate & ssl_private_key files are good\n\t# for the provided domain.\n\n\tfrom cryptography.hazmat.primitives.asymmetric.rsa import RSAPrivateKey\n\tfrom cryptography.x509 import Certificate\n\n\t# The ssl_certificate file may contain a chain of certificates. We'll\n\t# need to split that up before we can pass anything to openssl or\n\t# parse them in Python. Parse it with the cryptography library.\n\ttry:\n\t\tssl_cert_chain = load_cert_chain(ssl_certificate)\n\t\tcert = load_pem(ssl_cert_chain[0])\n\t\tif not isinstance(cert, Certificate): raise ValueError(\"This is not a certificate file.\")\n\texcept ValueError as e:\n\t\treturn (\"There is a problem with the certificate file: %s\" % str(e), None)\n\n\t# First check that the domain name is one of the names allowed by\n\t# the certificate.\n\tif domain is not None:\n\t\tcertificate_names, _cert_primary_name = get_certificate_domains(cert)\n\n\t\t# Check that the domain appears among the acceptable names, or a wildcard\n\t\t# form of the domain name (which is a stricter check than the specs but\n\t\t# should work in normal cases).\n\t\twildcard_domain = re.sub(r\"^[^\\.]+\", \"*\", domain)\n\t\tif domain not in certificate_names and wildcard_domain not in certificate_names:\n\t\t\treturn (\"The certificate is for the wrong domain name. It is for %s.\"\n\t\t\t\t% \", \".join(sorted(certificate_names)), None)\n\n\t# Second, check that the certificate matches the private key.\n\tif ssl_private_key is not None:\n\t\ttry:\n\t\t\twith open(ssl_private_key, 'rb') as f:\n\t\t\t\tpriv_key = load_pem(f.read())\n\t\texcept ValueError as e:\n\t\t\treturn (f\"The private key file {ssl_private_key} is not a private key file: {e!s}\", None)\n\n\t\tif not isinstance(priv_key, RSAPrivateKey):\n\t\t\treturn (\"The private key file %s is not a private key file.\" % ssl_private_key, None)\n\n\t\tif priv_key.public_key().public_numbers() != cert.public_key().public_numbers():\n\t\t\treturn (\"The certificate does not correspond to the private key at %s.\" % ssl_private_key, None)\n\n\t\t# We could also use the openssl command line tool to get the modulus\n\t\t# listed in each file. The output of each command below looks like \"Modulus=XXXXX\".\n\t\t# $ openssl rsa -inform PEM -noout -modulus -in ssl_private_key\n\t\t# $ openssl x509 -in ssl_certificate -noout -modulus\n\n\t# Third, check if the certificate is self-signed. Return a special flag string.\n\tif cert.issuer == cert.subject:\n\t\treturn (\"SELF-SIGNED\", None)\n\n\t# When selecting which certificate to use for non-primary domains, we check if the primary\n\t# certificate or a www-parent-domain certificate is good for the domain. There's no need\n\t# to run extra checks beyond this point.\n\tif just_check_domain:\n\t\treturn (\"OK\", None)\n\n\t# Check that the certificate hasn't expired. The datetimes returned by the\n\t# certificate are 'naive' and in UTC. We need to get the current time in UTC.\n\timport datetime\n\tnow = datetime.datetime.utcnow()\n\tif not(cert.not_valid_before <= now <= cert.not_valid_after):\n\t\treturn (f\"The certificate has expired or is not yet valid. It is valid from {cert.not_valid_before} to {cert.not_valid_after}.\", None)\n\n\t# Next validate that the certificate is valid. This checks whether the certificate\n\t# is self-signed, that the chain of trust makes sense, that it is signed by a CA\n\t# that Ubuntu has installed on this machine's list of CAs, and I think that it hasn't\n\t# expired.\n\n\t# The certificate chain has to be passed separately and is given via STDIN.\n\t# This command returns a non-zero exit status in most cases, so trap errors.\n\tretcode, verifyoutput = shell('check_output', [\n\t\t\"openssl\",\n\t\t\"verify\", \"-verbose\",\n\t\t\"-purpose\", \"sslserver\", \"-policy_check\",]\n\t\t+ ([] if len(ssl_cert_chain) == 1 else [\"-untrusted\", \"/proc/self/fd/0\"])\n\t\t+ [ssl_certificate],\n\t\tinput=b\"\\n\\n\".join(ssl_cert_chain[1:]),\n\t\ttrap=True)\n\n\tif \"self signed\" in verifyoutput:\n\t\t# Certificate is self-signed. Probably we detected this above.\n\t\treturn (\"SELF-SIGNED\", None)\n\n\telif retcode != 0:\n\t\tif \"unable to get local issuer certificate\" in verifyoutput:\n\t\t\treturn (\"The certificate is missing an intermediate chain or the intermediate chain is incorrect or incomplete. (%s)\" % verifyoutput, None)\n\n\t\t# There is some unknown problem. Return the `openssl verify` raw output.\n\t\treturn (\"There is a problem with the certificate.\", verifyoutput.strip())\n\n\telse:\n\t\t# `openssl verify` returned a zero exit status so the cert is currently\n\t\t# good.\n\n\t\t# But is it expiring soon?\n\t\tcert_expiration_date = cert.not_valid_after\n\t\tndays = (cert_expiration_date-now).days\n\t\tif not rounded_time or ndays <= 10:\n\t\t\t# Yikes better renew soon!\n\t\t\texpiry_info = \"The certificate expires in %d days on %s.\" % (ndays, cert_expiration_date.date().isoformat())\n\t\telse:\n\t\t\t# We'll renew it with Lets Encrypt.\n\t\t\texpiry_info = \"The certificate expires on %s.\" % cert_expiration_date.date().isoformat()\n\n\t\tif warn_if_expiring_soon and ndays <= warn_if_expiring_soon:\n\t\t\t# Warn on day 10 to give 4 days for us to automatically renew the\n\t\t\t# certificate, which occurs on day 14.\n\t\t\treturn (\"The certificate is expiring soon: \" + expiry_info, None)\n\n\t\t# Return the special OK code.\n\t\treturn (\"OK\", expiry_info)\n\ndef load_cert_chain(pemfile):\n\t# A certificate .pem file may contain a chain of certificates.\n\t# Load the file and split them apart.\n\tre_pem = rb\"(-+BEGIN (?:.+)-+[\\r\\n]+(?:[A-Za-z0-9+/=]{1,64}[\\r\\n]+)+-+END (?:.+)-+[\\r\\n]+)\"\n\twith open(pemfile, \"rb\") as f:\n\t\tpem = f.read() + b\"\\n\" # ensure trailing newline\n\t\tpemblocks = re.findall(re_pem, pem)\n\t\tif len(pemblocks) == 0:\n\t\t\tmsg = \"File does not contain valid PEM data.\"\n\t\t\traise ValueError(msg)\n\t\treturn pemblocks\n\ndef load_pem(pem):\n\t# Parse a \"---BEGIN .... END---\" PEM string and return a Python object for it\n\t# using classes from the cryptography package.\n\tfrom cryptography.x509 import load_pem_x509_certificate\n\tfrom cryptography.hazmat.primitives import serialization\n\tfrom cryptography.hazmat.backends import default_backend\n\tpem_type = re.match(b\"-+BEGIN (.*?)-+[\\r\\n]\", pem)\n\tif pem_type is None:\n\t\tmsg = \"File is not a valid PEM-formatted file.\"\n\t\traise ValueError(msg)\n\tpem_type = pem_type.group(1)\n\tif pem_type in {b\"RSA PRIVATE KEY\", b\"PRIVATE KEY\"}:\n\t\treturn serialization.load_pem_private_key(pem, password=None, backend=default_backend())\n\tif pem_type == b\"CERTIFICATE\":\n\t\treturn load_pem_x509_certificate(pem, default_backend())\n\traise ValueError(\"Unsupported PEM object type: \" + pem_type.decode(\"ascii\", \"replace\"))\n\ndef get_certificate_domains(cert):\n\tfrom cryptography.x509 import DNSName, ExtensionNotFound, OID_COMMON_NAME, OID_SUBJECT_ALTERNATIVE_NAME\n\timport idna\n\n\tnames = set()\n\tcn = None\n\n\t# The domain may be found in the Subject Common Name (CN). This comes back as an IDNA (ASCII)\n\t# string, which is the format we store domains in - so good.\n\ttry:\n\t\tcn = cert.subject.get_attributes_for_oid(OID_COMMON_NAME)[0].value\n\t\tnames.add(cn)\n\texcept IndexError:\n\t\t# No common name? Certificate is probably generated incorrectly.\n\t\t# But we'll let it error-out when it doesn't find the domain.\n\t\tpass\n\n\t# ... or be one of the Subject Alternative Names. The cryptography library handily IDNA-decodes\n\t# the names for us. We must encode back to ASCII, but wildcard certificates can't pass through\n\t# IDNA encoding/decoding so we must special-case. See https://github.com/pyca/cryptography/pull/2071.\n\tdef idna_decode_dns_name(dns_name):\n\t\tif dns_name.startswith(\"*.\"):\n\t\t\treturn \"*.\" + idna.encode(dns_name[2:]).decode('ascii')\n\t\telse:\n\t\t\treturn idna.encode(dns_name).decode('ascii')\n\n\ttry:\n\t\tsans = cert.extensions.get_extension_for_oid(OID_SUBJECT_ALTERNATIVE_NAME).value.get_values_for_type(DNSName)\n\t\tfor san in sans:\n\t\t\tnames.add(idna_decode_dns_name(san))\n\texcept ExtensionNotFound:\n\t\tpass\n\n\treturn names, cn\n\nif __name__ == \"__main__\":\n\t# Provision certificates.\n\tprovision_certificates_cmdline()\n\n\n\nFile: management/backup.py\n\n#!/usr/local/lib/mailinabox/env/bin/python\n\n# This script performs a backup of all user data:\n# 1) System services are stopped.\n# 2) STORAGE_ROOT/backup/before-backup is executed if it exists.\n# 3) An incremental encrypted backup is made using duplicity.\n# 4) The stopped services are restarted.\n# 5) STORAGE_ROOT/backup/after-backup is executed if it exists.\n\nimport os, os.path, re, datetime, sys\nimport dateutil.parser, dateutil.relativedelta, dateutil.tz\nimport rtyaml\nfrom exclusiveprocess import Lock\n\nfrom utils import load_environment, shell, wait_for_service\n\ndef backup_status(env):\n\t# If backups are disabled, return no status.\n\tconfig = get_backup_config(env)\n\tif config[\"target\"] == \"off\":\n\t\treturn { }\n\n\t# Query duplicity to get a list of all full and incremental\n\t# backups available.\n\n\tbackups = { }\n\tnow = datetime.datetime.now(dateutil.tz.tzlocal())\n\tbackup_root = os.path.join(env[\"STORAGE_ROOT\"], 'backup')\n\tbackup_cache_dir = os.path.join(backup_root, 'cache')\n\n\tdef reldate(date, ref, clip):\n\t\tif ref < date: return clip\n\t\trd = dateutil.relativedelta.relativedelta(ref, date)\n\t\tif rd.years > 1: return \"%d years, %d months\" % (rd.years, rd.months)\n\t\tif rd.years == 1: return \"%d year, %d months\" % (rd.years, rd.months)\n\t\tif rd.months > 1: return \"%d months, %d days\" % (rd.months, rd.days)\n\t\tif rd.months == 1: return \"%d month, %d days\" % (rd.months, rd.days)\n\t\tif rd.days >= 7: return \"%d days\" % rd.days\n\t\tif rd.days > 1: return \"%d days, %d hours\" % (rd.days, rd.hours)\n\t\tif rd.days == 1: return \"%d day, %d hours\" % (rd.days, rd.hours)\n\t\treturn \"%d hours, %d minutes\" % (rd.hours, rd.minutes)\n\n\t# Get duplicity collection status and parse for a list of backups.\n\tdef parse_line(line):\n\t\tkeys = line.strip().split()\n\t\tdate = dateutil.parser.parse(keys[1]).astimezone(dateutil.tz.tzlocal())\n\t\treturn {\n\t\t\t\"date\": keys[1],\n\t\t\t\"date_str\": date.strftime(\"%Y-%m-%d %X\") + \" \" + now.tzname(),\n\t\t\t\"date_delta\": reldate(date, now, \"the future?\"),\n\t\t\t\"full\": keys[0] == \"full\",\n\t\t\t\"size\": 0, # collection-status doesn't give us the size\n\t\t\t\"volumes\": int(keys[2]), # number of archive volumes for this backup (not really helpful)\n\t\t}\n\n\tcode, collection_status = shell('check_output', [\n\t\t\"/usr/bin/duplicity\",\n\t\t\"collection-status\",\n\t\t\"--archive-dir\", backup_cache_dir,\n\t\t\"--gpg-options\", \"'--cipher-algo=AES256'\",\n\t\t\"--log-fd\", \"1\",\n\t\t*get_duplicity_additional_args(env),\n\t\tget_duplicity_target_url(config)\n\t\t],\n\t\tget_duplicity_env_vars(env),\n\t\ttrap=True)\n\tif code != 0:\n\t\t# Command failed. This is likely due to an improperly configured remote\n\t\t# destination for the backups or the last backup job terminated unexpectedly.\n\t\traise Exception(\"Something is wrong with the backup: \" + collection_status)\n\tfor line in collection_status.split('\\n'):\n\t\tif line.startswith((\" full\", \" inc\")):\n\t\t\tbackup = parse_line(line)\n\t\t\tbackups[backup[\"date\"]] = backup\n\n\t# Look at the target directly to get the sizes of each of the backups. There is more than one file per backup.\n\t# Starting with duplicity in Ubuntu 18.04, \"signatures\" files have dates in their\n\t# filenames that are a few seconds off the backup date and so don't line up\n\t# with the list of backups we have. Track unmatched files so we know how much other\n\t# space is used for those.\n\tunmatched_file_size = 0\n\tfor fn, size in list_target_files(config):\n\t\tm = re.match(r\"duplicity-(full|full-signatures|(inc|new-signatures)\\.(?P\\d+T\\d+Z)\\.to)\\.(?P\\d+T\\d+Z)\\.\", fn)\n\t\tif not m: continue # not a part of a current backup chain\n\t\tkey = m.group(\"date\")\n\t\tif key in backups:\n\t\t\tbackups[key][\"size\"] += size\n\t\telse:\n\t\t\tunmatched_file_size += size\n\n\t# Ensure the rows are sorted reverse chronologically.\n\t# This is relied on by should_force_full() and the next step.\n\tbackups = sorted(backups.values(), key = lambda b : b[\"date\"], reverse=True)\n\n\t# Get the average size of incremental backups, the size of the\n\t# most recent full backup, and the date of the most recent\n\t# backup and the most recent full backup.\n\tincremental_count = 0\n\tincremental_size = 0\n\tfirst_date = None\n\tfirst_full_size = None\n\tfirst_full_date = None\n\tfor bak in backups:\n\t\tif first_date is None:\n\t\t\tfirst_date = dateutil.parser.parse(bak[\"date\"])\n\t\tif bak[\"full\"]:\n\t\t\tfirst_full_size = bak[\"size\"]\n\t\t\tfirst_full_date = dateutil.parser.parse(bak[\"date\"])\n\t\t\tbreak\n\t\tincremental_count += 1\n\t\tincremental_size += bak[\"size\"]\n\n\t# When will the most recent backup be deleted? It won't be deleted if the next\n\t# backup is incremental, because the increments rely on all past increments.\n\t# So first guess how many more incremental backups will occur until the next\n\t# full backup. That full backup frees up this one to be deleted. But, the backup\n\t# must also be at least min_age_in_days old too.\n\tdeleted_in = None\n\tif incremental_count > 0 and incremental_size > 0 and first_full_size is not None:\n\t\t# How many days until the next incremental backup? First, the part of\n\t\t# the algorithm based on increment sizes:\n\t\test_days_to_next_full = (.5 * first_full_size - incremental_size) / (incremental_size/incremental_count)\n\t\test_time_of_next_full = first_date + datetime.timedelta(days=est_days_to_next_full)\n\n\t\t# ...And then the part of the algorithm based on full backup age:\n\t\test_time_of_next_full = min(est_time_of_next_full, first_full_date + datetime.timedelta(days=config[\"min_age_in_days\"]*10+1))\n\n\t\t# It still can't be deleted until it's old enough.\n\t\test_deleted_on = max(est_time_of_next_full, first_date + datetime.timedelta(days=config[\"min_age_in_days\"]))\n\n\t\tdeleted_in = \"approx. %d days\" % round((est_deleted_on-now).total_seconds()/60/60/24 + .5)\n\n\t# When will a backup be deleted? Set the deleted_in field of each backup.\n\tsaw_full = False\n\tfor bak in backups:\n\t\tif deleted_in:\n\t\t\t# The most recent increment in a chain and all of the previous backups\n\t\t\t# it relies on are deleted at the same time.\n\t\t\tbak[\"deleted_in\"] = deleted_in\n\t\tif bak[\"full\"]:\n\t\t\t# Reset when we get to a full backup. A new chain start *next*.\n\t\t\tsaw_full = True\n\t\t\tdeleted_in = None\n\t\telif saw_full and not deleted_in:\n\t\t\t# We're now on backups prior to the most recent full backup. These are\n\t\t\t# free to be deleted as soon as they are min_age_in_days old.\n\t\t\tdeleted_in = reldate(now, dateutil.parser.parse(bak[\"date\"]) + datetime.timedelta(days=config[\"min_age_in_days\"]), \"on next daily backup\")\n\t\t\tbak[\"deleted_in\"] = deleted_in\n\n\treturn {\n\t\t\"backups\": backups,\n\t\t\"unmatched_file_size\": unmatched_file_size,\n\t}\n\ndef should_force_full(config, env):\n\t# Force a full backup when the total size of the increments\n\t# since the last full backup is greater than half the size\n\t# of that full backup.\n\tinc_size = 0\n\tfor bak in backup_status(env)[\"backups\"]:\n\t\tif not bak[\"full\"]:\n\t\t\t# Scan through the incremental backups cumulating\n\t\t\t# size...\n\t\t\tinc_size += bak[\"size\"]\n\t\telse:\n\t\t\t# ...until we reach the most recent full backup.\n\t\t\t# Return if we should to a full backup, which is based\n\t\t\t# on the size of the increments relative to the full\n\t\t\t# backup, as well as the age of the full backup.\n\t\t\tif inc_size > .5*bak[\"size\"]:\n\t\t\t\treturn True\n\t\t\tif dateutil.parser.parse(bak[\"date\"]) + datetime.timedelta(days=config[\"min_age_in_days\"]*10+1) < datetime.datetime.now(dateutil.tz.tzlocal()):\n\t\t\t\treturn True\n\t\t\treturn False\n\telse:\n\t\t# If we got here there are no (full) backups, so make one.\n\t\t# (I love for/else blocks. Here it's just to show off.)\n\t\treturn True\n\ndef get_passphrase(env):\n\t# Get the encryption passphrase. secret_key.txt is 2048 random\n\t# bits base64-encoded and with line breaks every 65 characters.\n\t# gpg will only take the first line of text, so sanity check that\n\t# that line is long enough to be a reasonable passphrase. It\n\t# only needs to be 43 base64-characters to match AES256's key\n\t# length of 32 bytes.\n\tbackup_root = os.path.join(env[\"STORAGE_ROOT\"], 'backup')\n\twith open(os.path.join(backup_root, 'secret_key.txt'), encoding=\"utf-8\") as f:\n\t\tpassphrase = f.readline().strip()\n\tif len(passphrase) < 43: raise Exception(\"secret_key.txt's first line is too short!\")\n\n\treturn passphrase\n\ndef get_duplicity_target_url(config):\n\ttarget = config[\"target\"]\n\n\tif get_target_type(config) == \"s3\":\n\t\tfrom urllib.parse import urlsplit, urlunsplit\n\t\ttarget = list(urlsplit(target))\n\n\t\t# Although we store the S3 hostname in the target URL,\n\t\t# duplicity no longer accepts it in the target URL. The hostname in\n\t\t# the target URL must be the bucket name. The hostname is passed\n\t\t# via get_duplicity_additional_args. Move the first part of the\n\t\t# path (the bucket name) into the hostname URL component, and leave\n\t\t# the rest for the path. (The S3 region name is also stored in the\n\t\t# hostname part of the URL, in the username portion, which we also\n\t\t# have to drop here).\n\t\ttarget[1], target[2] = target[2].lstrip('/').split('/', 1)\n\n\t\ttarget = urlunsplit(target)\n\n\treturn target\n\ndef get_duplicity_additional_args(env):\n\tconfig = get_backup_config(env)\n\n\tif get_target_type(config) == 'rsync':\n\t\t# Extract a port number for the ssh transport. Duplicity accepts the\n\t\t# optional port number syntax in the target, but it doesn't appear to act\n\t\t# on it, so we set the ssh port explicitly via the duplicity options.\n\t\tfrom urllib.parse import urlsplit\n\t\ttry:\n\t\t\tport = urlsplit(config[\"target\"]).port\n\t\texcept ValueError:\n\t\t\tport = 22\n\t\tif port is None:\n\t\t\tport = 22\n\n\t\treturn [\n\t\t\tf\"--ssh-options='-i /root/.ssh/id_rsa_miab -p {port}'\",\n\t\t\tf\"--rsync-options='-e \\\"/usr/bin/ssh -oStrictHostKeyChecking=no -oBatchMode=yes -p {port} -i /root/.ssh/id_rsa_miab\\\"'\",\n\t\t]\n\telif get_target_type(config) == 's3':\n\t\t# See note about hostname in get_duplicity_target_url.\n\t\t# The region name, which is required by some non-AWS endpoints,\n\t\t# is saved inside the username portion of the URL.\n\t\tfrom urllib.parse import urlsplit, urlunsplit\n\t\ttarget = urlsplit(config[\"target\"])\n\t\tendpoint_url = urlunsplit((\"https\", target.hostname, '', '', ''))\n\t\targs = [\"--s3-endpoint-url\", endpoint_url]\n\t\tif target.username: # region name is stuffed here\n\t\t\targs += [\"--s3-region-name\", target.username]\n\t\treturn args\n\n\treturn []\n\ndef get_duplicity_env_vars(env):\n\tconfig = get_backup_config(env)\n\n\tenv = { \"PASSPHRASE\" : get_passphrase(env) }\n\n\tif get_target_type(config) == 's3':\n\t\tenv[\"AWS_ACCESS_KEY_ID\"] = config[\"target_user\"]\n\t\tenv[\"AWS_SECRET_ACCESS_KEY\"] = config[\"target_pass\"]\n\n\treturn env\n\ndef get_target_type(config):\n\treturn config[\"target\"].split(\":\")[0]\n\ndef perform_backup(full_backup):\n\tenv = load_environment()\n\n\t# Create an global exclusive lock so that the backup script\n\t# cannot be run more than one.\n\tLock(die=True).forever()\n\n\tconfig = get_backup_config(env)\n\tbackup_root = os.path.join(env[\"STORAGE_ROOT\"], 'backup')\n\tbackup_cache_dir = os.path.join(backup_root, 'cache')\n\tbackup_dir = os.path.join(backup_root, 'encrypted')\n\n\t# Are backups disabled?\n\tif config[\"target\"] == \"off\":\n\t\treturn\n\n\t# On the first run, always do a full backup. Incremental\n\t# will fail. Otherwise do a full backup when the size of\n\t# the increments since the most recent full backup are\n\t# large.\n\ttry:\n\t\tfull_backup = full_backup or should_force_full(config, env)\n\texcept Exception as e:\n\t\t# This was the first call to duplicity, and there might\n\t\t# be an error already.\n\t\tprint(e)\n\t\tsys.exit(1)\n\n\t# Stop services.\n\tdef service_command(service, command, quit=None):\n\t\t# Execute silently, but if there is an error then display the output & exit.\n\t\tcode, ret = shell('check_output', [\"/usr/sbin/service\", service, command], capture_stderr=True, trap=True)\n\t\tif code != 0:\n\t\t\tprint(ret)\n\t\t\tif quit:\n\t\t\t\tsys.exit(code)\n\n\tservice_command(\"php8.0-fpm\", \"stop\", quit=True)\n\tservice_command(\"postfix\", \"stop\", quit=True)\n\tservice_command(\"dovecot\", \"stop\", quit=True)\n\tservice_command(\"postgrey\", \"stop\", quit=True)\n\n\t# Execute a pre-backup script that copies files outside the homedir.\n\t# Run as the STORAGE_USER user, not as root. Pass our settings in\n\t# environment variables so the script has access to STORAGE_ROOT.\n\tpre_script = os.path.join(backup_root, 'before-backup')\n\tif os.path.exists(pre_script):\n\t\tshell('check_call',\n\t\t\t['su', env['STORAGE_USER'], '-c', pre_script, config[\"target\"]],\n\t\t\tenv=env)\n\n\t# Run a backup of STORAGE_ROOT (but excluding the backups themselves!).\n\t# --allow-source-mismatch is needed in case the box's hostname is changed\n\t# after the first backup. See #396.\n\ttry:\n\t\tshell('check_call', [\n\t\t\t\"/usr/bin/duplicity\",\n\t\t\t\"full\" if full_backup else \"incr\",\n\t\t\t\"--verbosity\", \"warning\", \"--no-print-statistics\",\n\t\t\t\"--archive-dir\", backup_cache_dir,\n\t\t\t\"--exclude\", backup_root,\n\t\t\t\"--volsize\", \"250\",\n\t\t\t\"--gpg-options\", \"'--cipher-algo=AES256'\",\n\t\t\t\"--allow-source-mismatch\",\n\t\t\t*get_duplicity_additional_args(env),\n\t\t\tenv[\"STORAGE_ROOT\"],\n\t\t\tget_duplicity_target_url(config),\n\t\t\t],\n\t\t\tget_duplicity_env_vars(env))\n\tfinally:\n\t\t# Start services again.\n\t\tservice_command(\"postgrey\", \"start\", quit=False)\n\t\tservice_command(\"dovecot\", \"start\", quit=False)\n\t\tservice_command(\"postfix\", \"start\", quit=False)\n\t\tservice_command(\"php8.0-fpm\", \"start\", quit=False)\n\n\t# Remove old backups. This deletes all backup data no longer needed\n\t# from more than 3 days ago.\n\tshell('check_call', [\n\t\t\"/usr/bin/duplicity\",\n\t\t\"remove-older-than\",\n\t\t\"%dD\" % config[\"min_age_in_days\"],\n\t\t\"--verbosity\", \"error\",\n\t\t\"--archive-dir\", backup_cache_dir,\n\t\t\"--force\",\n\t\t*get_duplicity_additional_args(env),\n\t\tget_duplicity_target_url(config)\n\t\t],\n\t\tget_duplicity_env_vars(env))\n\n\t# From duplicity's manual:\n\t# \"This should only be necessary after a duplicity session fails or is\n\t# aborted prematurely.\"\n\t# That may be unlikely here but we may as well ensure we tidy up if\n\t# that does happen - it might just have been a poorly timed reboot.\n\tshell('check_call', [\n\t\t\"/usr/bin/duplicity\",\n\t\t\"cleanup\",\n\t\t\"--verbosity\", \"error\",\n\t\t\"--archive-dir\", backup_cache_dir,\n\t\t\"--force\",\n\t\t*get_duplicity_additional_args(env),\n\t\tget_duplicity_target_url(config)\n\t\t],\n\t\tget_duplicity_env_vars(env))\n\n\t# Change ownership of backups to the user-data user, so that the after-bcakup\n\t# script can access them.\n\tif get_target_type(config) == 'file':\n\t\tshell('check_call', [\"/bin/chown\", \"-R\", env[\"STORAGE_USER\"], backup_dir])\n\n\t# Execute a post-backup script that does the copying to a remote server.\n\t# Run as the STORAGE_USER user, not as root. Pass our settings in\n\t# environment variables so the script has access to STORAGE_ROOT.\n\tpost_script = os.path.join(backup_root, 'after-backup')\n\tif os.path.exists(post_script):\n\t\tshell('check_call',\n\t\t\t['su', env['STORAGE_USER'], '-c', post_script, config[\"target\"]],\n\t\t\tenv=env)\n\n\t# Our nightly cron job executes system status checks immediately after this\n\t# backup. Since it checks that dovecot and postfix are running, block for a\n\t# bit (maximum of 10 seconds each) to give each a chance to finish restarting\n\t# before the status checks might catch them down. See #381.\n\twait_for_service(25, True, env, 10)\n\twait_for_service(993, True, env, 10)\n\ndef run_duplicity_verification():\n\tenv = load_environment()\n\tbackup_root = os.path.join(env[\"STORAGE_ROOT\"], 'backup')\n\tconfig = get_backup_config(env)\n\tbackup_cache_dir = os.path.join(backup_root, 'cache')\n\n\tshell('check_call', [\n\t\t\"/usr/bin/duplicity\",\n\t\t\"--verbosity\", \"info\",\n\t\t\"verify\",\n\t\t\"--compare-data\",\n\t\t\"--archive-dir\", backup_cache_dir,\n\t\t\"--exclude\", backup_root,\n\t\t*get_duplicity_additional_args(env),\n\t\tget_duplicity_target_url(config),\n\t\tenv[\"STORAGE_ROOT\"],\n\t], get_duplicity_env_vars(env))\n\ndef run_duplicity_restore(args):\n\tenv = load_environment()\n\tconfig = get_backup_config(env)\n\tbackup_cache_dir = os.path.join(env[\"STORAGE_ROOT\"], 'backup', 'cache')\n\tshell('check_call', [\n\t\t\"/usr/bin/duplicity\",\n\t\t\"restore\",\n\t\t\"--archive-dir\", backup_cache_dir,\n\t\t*get_duplicity_additional_args(env),\n\t\tget_duplicity_target_url(config),\n\t\t*args],\n\t\tget_duplicity_env_vars(env))\n\ndef print_duplicity_command():\n\timport shlex\n\tenv = load_environment()\n\tconfig = get_backup_config(env)\n\tbackup_cache_dir = os.path.join(env[\"STORAGE_ROOT\"], 'backup', 'cache')\n\tfor k, v in get_duplicity_env_vars(env).items():\n\t\tprint(f\"export {k}={shlex.quote(v)}\")\n\tprint(\"duplicity\", \"{command}\", shlex.join([\n\t\t\"--archive-dir\", backup_cache_dir,\n\t\t*get_duplicity_additional_args(env),\n\t\tget_duplicity_target_url(config)\n\t\t]))\n\ndef list_target_files(config):\n\timport urllib.parse\n\ttry:\n\t\ttarget = urllib.parse.urlparse(config[\"target\"])\n\texcept ValueError:\n\t\treturn \"invalid target\"\n\n\tif target.scheme == \"file\":\n\t\treturn [(fn, os.path.getsize(os.path.join(target.path, fn))) for fn in os.listdir(target.path)]\n\n\telif target.scheme == \"rsync\":\n\t\trsync_fn_size_re = re.compile(r'.* ([^ ]*) [^ ]* [^ ]* (.*)')\n\t\trsync_target = '{host}:{path}'\n\n\t\t# Strip off any trailing port specifier because it's not valid in rsync's\n\t\t# DEST syntax. Explicitly set the port number for the ssh transport.\n\t\tuser_host, *_ = target.netloc.rsplit(':', 1)\n\t\ttry:\n\t\t\tport = target.port\n\t\texcept ValueError:\n\t\t\t port = 22\n\t\tif port is None:\n\t\t\tport = 22\n\n\t\ttarget_path = target.path\n\t\tif not target_path.endswith('/'):\n\t\t\ttarget_path = target_path + '/'\n\t\tif target_path.startswith('/'):\n\t\t\ttarget_path = target_path[1:]\n\n\t\trsync_command = [ 'rsync',\n\t\t\t\t\t'-e',\n\t\t\t\t\tf'/usr/bin/ssh -i /root/.ssh/id_rsa_miab -oStrictHostKeyChecking=no -oBatchMode=yes -p {port}',\n\t\t\t\t\t'--list-only',\n\t\t\t\t\t'-r',\n\t\t\t\t\trsync_target.format(\n\t\t\t\t\t\thost=user_host,\n\t\t\t\t\t\tpath=target_path)\n\t\t\t\t]\n\n\t\tcode, listing = shell('check_output', rsync_command, trap=True, capture_stderr=True)\n\t\tif code == 0:\n\t\t\tret = []\n\t\t\tfor l in listing.split('\\n'):\n\t\t\t\tmatch = rsync_fn_size_re.match(l)\n\t\t\t\tif match:\n\t\t\t\t\tret.append( (match.groups()[1], int(match.groups()[0].replace(',',''))) )\n\t\t\treturn ret\n\t\telse:\n\t\t\tif 'Permission denied (publickey).' in listing:\n\t\t\t\treason = \"Invalid user or check you correctly copied the SSH key.\"\n\t\t\telif 'No such file or directory' in listing:\n\t\t\t\treason = f\"Provided path {target_path} is invalid.\"\n\t\t\telif 'Network is unreachable' in listing:\n\t\t\t\treason = f\"The IP address {target.hostname} is unreachable.\"\n\t\t\telif 'Could not resolve hostname' in listing:\n\t\t\t\treason = f\"The hostname {target.hostname} cannot be resolved.\"\n\t\t\telse:\n\t\t\t\treason = (\"Unknown error.\"\n\t\t\t\t\t\t\"Please check running 'management/backup.py --verify'\"\n\t\t\t\t\t\t\"from mailinabox sources to debug the issue.\")\n\t\t\tmsg = f\"Connection to rsync host failed: {reason}\"\n\t\t\traise ValueError(msg)\n\n\telif target.scheme == \"s3\":\n\t\timport boto3.s3\n\t\tfrom botocore.exceptions import ClientError\n\n\t\t# separate bucket from path in target\n\t\tbucket = target.path[1:].split('/')[0]\n\t\tpath = '/'.join(target.path[1:].split('/')[1:]) + '/'\n\n\t\t# If no prefix is specified, set the path to '', otherwise boto won't list the files\n\t\tif path == '/':\n\t\t\tpath = ''\n\n\t\tif bucket == \"\":\n\t\t\tmsg = \"Enter an S3 bucket name.\"\n\t\t\traise ValueError(msg)\n\n\t\t# connect to the region & bucket\n\t\ttry:\n\t\t\ts3 = boto3.client('s3', \\\n\t\t\t\tendpoint_url=f'https://{target.hostname}', \\\n\t\t\t\taws_access_key_id=config['target_user'], \\\n\t\t\t\taws_secret_access_key=config['target_pass'])\n\t\t\tbucket_objects = s3.list_objects_v2(Bucket=bucket, Prefix=path)['Contents']\n\t\t\tbackup_list = [(key['Key'][len(path):], key['Size']) for key in bucket_objects]\n\t\texcept ClientError as e:\n\t\t\traise ValueError(e)\n\t\treturn backup_list\n\telif target.scheme == 'b2':\n\t\tfrom b2sdk.v1 import InMemoryAccountInfo, B2Api\n\t\tfrom b2sdk.v1.exception import NonExistentBucket\n\t\tinfo = InMemoryAccountInfo()\n\t\tb2_api = B2Api(info)\n\n\t\t# Extract information from target\n\t\tb2_application_keyid = target.netloc[:target.netloc.index(':')]\n\t\tb2_application_key = urllib.parse.unquote(target.netloc[target.netloc.index(':')+1:target.netloc.index('@')])\n\t\tb2_bucket = target.netloc[target.netloc.index('@')+1:]\n\n\t\ttry:\n\t\t\tb2_api.authorize_account(\"production\", b2_application_keyid, b2_application_key)\n\t\t\tbucket = b2_api.get_bucket_by_name(b2_bucket)\n\t\texcept NonExistentBucket:\n\t\t\tmsg = \"B2 Bucket does not exist. Please double check your information!\"\n\t\t\traise ValueError(msg)\n\t\treturn [(key.file_name, key.size) for key, _ in bucket.ls()]\n\n\telse:\n\t\traise ValueError(config[\"target\"])\n\n\ndef backup_set_custom(env, target, target_user, target_pass, min_age):\n\tconfig = get_backup_config(env, for_save=True)\n\n\t# min_age must be an int\n\tif isinstance(min_age, str):\n\t\tmin_age = int(min_age)\n\n\tconfig[\"target\"] = target\n\tconfig[\"target_user\"] = target_user\n\tconfig[\"target_pass\"] = target_pass\n\tconfig[\"min_age_in_days\"] = min_age\n\n\t# Validate.\n\ttry:\n\t\tif config[\"target\"] not in {\"off\", \"local\"}:\n\t\t\t# these aren't supported by the following function, which expects a full url in the target key,\n\t\t\t# which is what is there except when loading the config prior to saving\n\t\t\tlist_target_files(config)\n\texcept ValueError as e:\n\t\treturn str(e)\n\n\twrite_backup_config(env, config)\n\n\treturn \"OK\"\n\ndef get_backup_config(env, for_save=False, for_ui=False):\n\tbackup_root = os.path.join(env[\"STORAGE_ROOT\"], 'backup')\n\n\t# Defaults.\n\tconfig = {\n\t\t\"min_age_in_days\": 3,\n\t\t\"target\": \"local\",\n\t}\n\n\t# Merge in anything written to custom.yaml.\n\ttry:\n\t\twith open(os.path.join(backup_root, 'custom.yaml'), encoding=\"utf-8\") as f:\n\t\t\tcustom_config = rtyaml.load(f)\n\t\tif not isinstance(custom_config, dict): raise ValueError # caught below\n\t\tconfig.update(custom_config)\n\texcept:\n\t\tpass\n\n\t# When updating config.yaml, don't do any further processing on what we find.\n\tif for_save:\n\t\treturn config\n\n\t# When passing this back to the admin to show the current settings, do not include\n\t# authentication details. The user will have to re-enter it.\n\tif for_ui:\n\t\tfor field in (\"target_user\", \"target_pass\"):\n\t\t\tif field in config:\n\t\t\t\tdel config[field]\n\n\t# helper fields for the admin\n\tconfig[\"file_target_directory\"] = os.path.join(backup_root, 'encrypted')\n\tconfig[\"enc_pw_file\"] = os.path.join(backup_root, 'secret_key.txt')\n\tif config[\"target\"] == \"local\":\n\t\t# Expand to the full URL.\n\t\tconfig[\"target\"] = \"file://\" + config[\"file_target_directory\"]\n\tssh_pub_key = os.path.join('/root', '.ssh', 'id_rsa_miab.pub')\n\tif os.path.exists(ssh_pub_key):\n\t\twith open(ssh_pub_key, encoding=\"utf-8\") as f:\n\t\t\tconfig[\"ssh_pub_key\"] = f.read()\n\n\treturn config\n\ndef write_backup_config(env, newconfig):\n\tbackup_root = os.path.join(env[\"STORAGE_ROOT\"], 'backup')\n\twith open(os.path.join(backup_root, 'custom.yaml'), \"w\", encoding=\"utf-8\") as f:\n\t\tf.write(rtyaml.dump(newconfig))\n\nif __name__ == \"__main__\":\n\tif sys.argv[-1] == \"--verify\":\n\t\t# Run duplicity's verification command to check a) the backup files\n\t\t# are readable, and b) report if they are up to date.\n\t\trun_duplicity_verification()\n\n\telif sys.argv[-1] == \"--list\":\n\t\t# List the saved backup files.\n\t\tfor fn, size in list_target_files(get_backup_config(load_environment())):\n\t\t\tprint(f\"{fn}\\t{size}\")\n\n\telif sys.argv[-1] == \"--status\":\n\t\t# Show backup status.\n\t\tret = backup_status(load_environment())\n\t\tprint(rtyaml.dump(ret[\"backups\"]))\n\t\tprint(\"Storage for unmatched files:\", ret[\"unmatched_file_size\"])\n\n\telif len(sys.argv) >= 2 and sys.argv[1] == \"--restore\":\n\t\t# Run duplicity restore. Rest of command line passed as arguments\n\t\t# to duplicity. The restore path should be specified.\n\t\trun_duplicity_restore(sys.argv[2:])\n\n\telif sys.argv[-1] == \"--duplicity-command\":\n\t\tprint_duplicity_command()\n\n\telse:\n\t\t# Perform a backup. Add --full to force a full backup rather than\n\t\t# possibly performing an incremental backup.\n\t\tfull_backup = \"--full\" in sys.argv\n\t\tperform_backup(full_backup)\n\n\n\nFile: management/status_checks.py\n\n#!/usr/local/lib/mailinabox/env/bin/python\n#\n# Checks that the upstream DNS has been set correctly and that\n# TLS certificates have been signed, etc., and if not tells the user\n# what to do next.\n\nimport sys, os, os.path, re, datetime, multiprocessing.pool\nimport asyncio\n\nimport dns.reversename, dns.resolver\nimport idna\nimport psutil\nimport postfix_mta_sts_resolver.resolver\n\nfrom dns_update import get_dns_zones, build_tlsa_record, get_custom_dns_config, get_secondary_dns, get_custom_dns_records\nfrom web_update import get_web_domains, get_domains_with_a_records\nfrom ssl_certificates import get_ssl_certificates, get_domain_ssl_files, check_certificate\nfrom mailconfig import get_mail_domains, get_mail_aliases\n\nfrom utils import shell, sort_domains, load_env_vars_from_file, load_settings, get_ssh_port, get_ssh_config_value\n\ndef get_services():\n\treturn [\n\t\t{ \"name\": \"Local DNS (bind9)\", \"port\": 53, \"public\": False, },\n\t\t#{ \"name\": \"NSD Control\", \"port\": 8952, \"public\": False, },\n\t\t{ \"name\": \"Local DNS Control (bind9/rndc)\", \"port\": 953, \"public\": False, },\n\t\t{ \"name\": \"Dovecot LMTP LDA\", \"port\": 10026, \"public\": False, },\n\t\t{ \"name\": \"Postgrey\", \"port\": 10023, \"public\": False, },\n\t\t{ \"name\": \"Spamassassin\", \"port\": 10025, \"public\": False, },\n\t\t{ \"name\": \"OpenDKIM\", \"port\": 8891, \"public\": False, },\n\t\t{ \"name\": \"OpenDMARC\", \"port\": 8893, \"public\": False, },\n\t\t{ \"name\": \"Mail-in-a-Box Management Daemon\", \"port\": 10222, \"public\": False, },\n\t\t{ \"name\": \"SSH Login (ssh)\", \"port\": get_ssh_port(), \"public\": True, },\n\t\t{ \"name\": \"Public DNS (nsd4)\", \"port\": 53, \"public\": True, },\n\t\t{ \"name\": \"Incoming Mail (SMTP/postfix)\", \"port\": 25, \"public\": True, },\n\t\t{ \"name\": \"Outgoing Mail (SMTP 465/postfix)\", \"port\": 465, \"public\": True, },\n\t\t{ \"name\": \"Outgoing Mail (SMTP 587/postfix)\", \"port\": 587, \"public\": True, },\n\t\t#{ \"name\": \"Postfix/master\", \"port\": 10587, \"public\": True, },\n\t\t{ \"name\": \"IMAPS (dovecot)\", \"port\": 993, \"public\": True, },\n\t\t{ \"name\": \"Mail Filters (Sieve/dovecot)\", \"port\": 4190, \"public\": True, },\n\t\t{ \"name\": \"HTTP Web (nginx)\", \"port\": 80, \"public\": True, },\n\t\t{ \"name\": \"HTTPS Web (nginx)\", \"port\": 443, \"public\": True, },\n\t]\n\ndef run_checks(rounded_values, env, output, pool, domains_to_check=None):\n\t# run systems checks\n\toutput.add_heading(\"System\")\n\n\t# check that services are running\n\tif not run_services_checks(env, output, pool):\n\t\t# If critical services are not running, stop. If bind9 isn't running,\n\t\t# all later DNS checks will timeout and that will take forever to\n\t\t# go through, and if running over the web will cause a fastcgi timeout.\n\t\treturn\n\n\t# clear bind9's DNS cache so our DNS checks are up to date\n\t# (ignore errors; if bind9/rndc isn't running we'd already report\n\t# that in run_services checks.)\n\tshell('check_call', [\"/usr/sbin/rndc\", \"flush\"], trap=True)\n\n\trun_system_checks(rounded_values, env, output)\n\n\t# perform other checks asynchronously\n\n\trun_network_checks(env, output)\n\trun_domain_checks(rounded_values, env, output, pool, domains_to_check=domains_to_check)\n\ndef run_services_checks(env, output, pool):\n\t# Check that system services are running.\n\tall_running = True\n\tfatal = False\n\tret = pool.starmap(check_service, ((i, service, env) for i, service in enumerate(get_services())), chunksize=1)\n\tfor _i, running, fatal2, output2 in sorted(ret):\n\t\tif output2 is None: continue # skip check (e.g. no port was set, e.g. no sshd)\n\t\tall_running = all_running and running\n\t\tfatal = fatal or fatal2\n\t\toutput2.playback(output)\n\n\t# Check fail2ban.\n\tcode, ret = shell('check_output', [\"fail2ban-client\", \"status\"], capture_stderr=True, trap=True)\n\tif code != 0:\n\t\toutput.print_error(\"fail2ban is not running.\")\n\t\tall_running = False\n\n\tif all_running:\n\t\toutput.print_ok(\"All system services are running.\")\n\n\treturn not fatal\n\ndef check_service(i, service, env):\n\tif not service[\"port\"]:\n\t\t# Skip check (no port, e.g. no sshd).\n\t\treturn (i, None, None, None)\n\n\toutput = BufferedOutput()\n\trunning = False\n\tfatal = False\n\n\t# Helper function to make a connection to the service, since we try\n\t# up to three ways (localhost, IPv4 address, IPv6 address).\n\tdef try_connect(ip):\n\t\t# Connect to the given IP address on the service's port with a one-second timeout.\n\t\timport socket\n\t\ts = socket.socket(socket.AF_INET if \":\" not in ip else socket.AF_INET6, socket.SOCK_STREAM)\n\t\ts.settimeout(1)\n\t\ttry:\n\t\t\ts.connect((ip, service[\"port\"]))\n\t\t\treturn True\n\t\texcept OSError:\n\t\t\t# timed out or some other odd error\n\t\t\treturn False\n\t\tfinally:\n\t\t\ts.close()\n\n\tif service[\"public\"]:\n\t\t# Service should be publicly accessible.\n\t\tif try_connect(env[\"PUBLIC_IP\"]):\n\t\t\t# IPv4 ok.\n\t\t\tif not env.get(\"PUBLIC_IPV6\") or service.get(\"ipv6\") is False or try_connect(env[\"PUBLIC_IPV6\"]):\n\t\t\t\t# No IPv6, or service isn't meant to run on IPv6, or IPv6 is good.\n\t\t\t\trunning = True\n\n\t\t\t# IPv4 ok but IPv6 failed. Try the PRIVATE_IPV6 address to see if the service is bound to the interface.\n\t\t\telif service[\"port\"] != 53 and try_connect(env[\"PRIVATE_IPV6\"]):\n\t\t\t\toutput.print_error(\"%s is running (and available over IPv4 and the local IPv6 address), but it is not publicly accessible at %s:%d.\" % (service['name'], env['PUBLIC_IPV6'], service['port']))\n\t\t\telse:\n\t\t\t\toutput.print_error(\"%s is running and available over IPv4 but is not accessible over IPv6 at %s port %d.\" % (service['name'], env['PUBLIC_IPV6'], service['port']))\n\n\t\t# IPv4 failed. Try the private IP to see if the service is running but not accessible (except DNS because a different service runs on the private IP).\n\t\telif service[\"port\"] != 53 and try_connect(\"127.0.0.1\"):\n\t\t\toutput.print_error(\"%s is running but is not publicly accessible at %s:%d.\" % (service['name'], env['PUBLIC_IP'], service['port']))\n\t\telse:\n\t\t\toutput.print_error(\"%s is not running (port %d).\" % (service['name'], service['port']))\n\n\t\t# Why is nginx not running?\n\t\tif not running and service[\"port\"] in {80, 443}:\n\t\t\toutput.print_line(shell('check_output', ['nginx', '-t'], capture_stderr=True, trap=True)[1].strip())\n\n\t# Service should be running locally.\n\telif try_connect(\"127.0.0.1\"):\n\t\trunning = True\n\telse:\n\t\toutput.print_error(\"%s is not running (port %d).\" % (service['name'], service['port']))\n\n\t# Flag if local DNS is not running.\n\tif not running and service[\"port\"] == 53 and service[\"public\"] is False:\n\t\tfatal = True\n\n\treturn (i, running, fatal, output)\n\ndef run_system_checks(rounded_values, env, output):\n\tcheck_ssh_password(env, output)\n\tcheck_software_updates(env, output)\n\tcheck_miab_version(env, output)\n\tcheck_system_aliases(env, output)\n\tcheck_free_disk_space(rounded_values, env, output)\n\tcheck_free_memory(rounded_values, env, output)\n\ndef check_ufw(env, output):\n\tif not os.path.isfile('/usr/sbin/ufw'):\n\t\toutput.print_warning(\"\"\"The ufw program was not installed. If your system is able to run iptables, rerun the setup.\"\"\")\n\t\treturn\n\n\tcode, ufw = shell('check_output', ['ufw', 'status'], trap=True)\n\n\tif code != 0:\n\t\t# The command failed, it's safe to say the firewall is disabled\n\t\toutput.print_warning(\"\"\"The firewall is not working on this machine. An error was received\n\t\t\t\t\twhile trying to check the firewall. To investigate run 'sudo ufw status'.\"\"\")\n\t\treturn\n\n\tufw = ufw.splitlines()\n\tif ufw[0] == \"Status: active\":\n\t\tnot_allowed_ports = 0\n\t\tfor service in get_services():\n\t\t\tif service[\"public\"] and not is_port_allowed(ufw, service[\"port\"]):\n\t\t\t\tnot_allowed_ports += 1\n\t\t\t\toutput.print_error(\"Port {} ({}) should be allowed in the firewall, please re-run the setup.\".format(service[\"port\"], service[\"name\"]))\n\n\t\tif not_allowed_ports == 0:\n\t\t\toutput.print_ok(\"Firewall is active.\")\n\telse:\n\t\toutput.print_warning(\"\"\"The firewall is disabled on this machine. This might be because the system\n\t\t\tis protected by an external firewall. We can't protect the system against bruteforce attacks\n\t\t\twithout the local firewall active. Connect to the system via ssh and try to run: ufw enable.\"\"\")\n\ndef is_port_allowed(ufw, port):\n\treturn any(re.match(str(port) +\"[/ \\t].*\", item) for item in ufw)\n\ndef check_ssh_password(env, output):\n\tconfig_value = get_ssh_config_value(\"passwordauthentication\")\n\tif config_value:\n\t\tif config_value == \"no\":\n\t\t\toutput.print_ok(\"SSH disallows password-based login.\")\n\t\telse:\n\t\t\toutput.print_error(\"\"\"The SSH server on this machine permits password-based login. A more secure\n\t\t\t\tway to log in is using a public key. Add your SSH public key to $HOME/.ssh/authorized_keys, check\n\t\t\t\tthat you can log in without a password, set the option 'PasswordAuthentication no' in\n\t\t\t\t/etc/ssh/sshd_config, and then restart the openssh via 'sudo service ssh restart'.\"\"\")\n\ndef is_reboot_needed_due_to_package_installation():\n\treturn os.path.exists(\"/var/run/reboot-required\")\n\ndef check_software_updates(env, output):\n\t# Check for any software package updates.\n\tpkgs = list_apt_updates(apt_update=False)\n\tif is_reboot_needed_due_to_package_installation():\n\t\toutput.print_error(\"System updates have been installed and a reboot of the machine is required.\")\n\telif len(pkgs) == 0:\n\t\toutput.print_ok(\"System software is up to date.\")\n\telse:\n\t\toutput.print_error(\"There are %d software packages that can be updated.\" % len(pkgs))\n\t\tfor p in pkgs:\n\t\t\toutput.print_line(\"{} ({})\".format(p[\"package\"], p[\"version\"]))\n\ndef check_system_aliases(env, output):\n\t# Check that the administrator alias exists since that's where all\n\t# admin email is automatically directed.\n\tcheck_alias_exists(\"System administrator address\", \"administrator@\" + env['PRIMARY_HOSTNAME'], env, output)\n\ndef check_free_disk_space(rounded_values, env, output):\n\t# Check free disk space.\n\tst = os.statvfs(env['STORAGE_ROOT'])\n\tbytes_total = st.f_blocks * st.f_frsize\n\tbytes_free = st.f_bavail * st.f_frsize\n\tdisk_msg = \"The disk has %.2f GB space remaining.\" % (bytes_free/1024.0/1024.0/1024.0)\n\tif bytes_free > .3 * bytes_total:\n\t\tif rounded_values: disk_msg = \"The disk has more than 30% free space.\"\n\t\toutput.print_ok(disk_msg)\n\telif bytes_free > .15 * bytes_total:\n\t\tif rounded_values: disk_msg = \"The disk has less than 30% free space.\"\n\t\toutput.print_warning(disk_msg)\n\telse:\n\t\tif rounded_values: disk_msg = \"The disk has less than 15% free space.\"\n\t\toutput.print_error(disk_msg)\n\n\t# Check that there's only one duplicity cache. If there's more than one,\n\t# it's probably no longer in use, and we can recommend clearing the cache\n\t# to save space. The cache directory may not exist yet, which is OK.\n\tbackup_cache_path = os.path.join(env['STORAGE_ROOT'], 'backup/cache')\n\ttry:\n\t\tbackup_cache_count = len(os.listdir(backup_cache_path))\n\texcept:\n\t\tbackup_cache_count = 0\n\tif backup_cache_count > 1:\n\t\toutput.print_warning(f\"The backup cache directory {backup_cache_path} has more than one backup target cache. Consider clearing this directory to save disk space.\")\n\ndef check_free_memory(rounded_values, env, output):\n\t# Check free memory.\n\tpercent_free = 100 - psutil.virtual_memory().percent\n\tmemory_msg = \"System memory is %s%% free.\" % str(round(percent_free))\n\tif percent_free >= 20:\n\t\tif rounded_values: memory_msg = \"System free memory is at least 20%.\"\n\t\toutput.print_ok(memory_msg)\n\telif percent_free >= 10:\n\t\tif rounded_values: memory_msg = \"System free memory is below 20%.\"\n\t\toutput.print_warning(memory_msg)\n\telse:\n\t\tif rounded_values: memory_msg = \"System free memory is below 10%.\"\n\t\toutput.print_error(memory_msg)\n\ndef run_network_checks(env, output):\n\t# Also see setup/network-checks.sh.\n\n\toutput.add_heading(\"Network\")\n\n\tcheck_ufw(env, output)\n\n\t# Stop if we cannot make an outbound connection on port 25. Many residential\n\t# networks block outbound port 25 to prevent their network from sending spam.\n\t# See if we can reach one of Google's MTAs with a 5-second timeout.\n\t_code, ret = shell(\"check_call\", [\"/bin/nc\", \"-z\", \"-w5\", \"aspmx.l.google.com\", \"25\"], trap=True)\n\tif ret == 0:\n\t\toutput.print_ok(\"Outbound mail (SMTP port 25) is not blocked.\")\n\telse:\n\t\toutput.print_error(\"\"\"Outbound mail (SMTP port 25) seems to be blocked by your network. You\n\t\t\twill not be able to send any mail. Many residential networks block port 25 to prevent hijacked\n\t\t\tmachines from being able to send spam. A quick connection test to Google's mail server on port 25\n\t\t\tfailed.\"\"\")\n\n\t# Stop if the IPv4 address is listed in the ZEN Spamhaus Block List.\n\t# The user might have ended up on an IP address that was previously in use\n\t# by a spammer, or the user may be deploying on a residential network. We\n\t# will not be able to reliably send mail in these cases.\n\t\n\t# See https://www.spamhaus.org/news/article/807/using-our-public-mirrors-check-your-return-codes-now. for\n\t# information on spamhaus return codes\n\trev_ip4 = \".\".join(reversed(env['PUBLIC_IP'].split('.')))\n\tzen = query_dns(rev_ip4+'.zen.spamhaus.org', 'A', nxdomain=None)\n\tif zen is None:\n\t\toutput.print_ok(\"IP address is not blacklisted by zen.spamhaus.org.\")\n\telif zen == \"[timeout]\":\n\t\toutput.print_warning(\"Connection to zen.spamhaus.org timed out. Could not determine whether this box's IP address is blacklisted. Please try again later.\")\n\telif zen == \"[Not Set]\":\n\t\toutput.print_warning(\"Could not connect to zen.spamhaus.org. Could not determine whether this box's IP address is blacklisted. Please try again later.\")\n\telif zen == \"127.255.255.252\":\n\t\toutput.print_warning(\"Incorrect spamhaus query: %s. Could not determine whether this box's IP address is blacklisted.\" % (rev_ip4+'.zen.spamhaus.org'))\n\telif zen == \"127.255.255.254\":\n\t\toutput.print_warning(\"Mail-in-a-Box is configured to use a public DNS server. This is not supported by spamhaus. Could not determine whether this box's IP address is blacklisted.\")\n\telif zen == \"127.255.255.255\":\n\t\toutput.print_warning(\"Too many queries have been performed on the spamhaus server. Could not determine whether this box's IP address is blacklisted.\")\n\telse:\n\t\toutput.print_error(\"\"\"The IP address of this machine {} is listed in the Spamhaus Block List (code {}),\n\t\t\twhich may prevent recipients from receiving your email. See http://www.spamhaus.org/query/ip/{}.\"\"\".format(env['PUBLIC_IP'], zen, env['PUBLIC_IP']))\n\ndef run_domain_checks(rounded_time, env, output, pool, domains_to_check=None):\n\t# Get the list of domains we handle mail for.\n\tmail_domains = get_mail_domains(env)\n\n\t# Get the list of domains we serve DNS zones for (i.e. does not include subdomains).\n\tdns_zonefiles = dict(get_dns_zones(env))\n\tdns_domains = set(dns_zonefiles)\n\n\t# Get the list of domains we serve HTTPS for.\n\tweb_domains = set(get_web_domains(env))\n\n\tif domains_to_check is None:\n\t\tdomains_to_check = mail_domains | dns_domains | web_domains\n\n\t# Remove \"www\", \"autoconfig\", \"autodiscover\", and \"mta-sts\" subdomains, which we group with their parent,\n\t# if their parent is in the domains to check list.\n\tdomains_to_check = [\n\t\td for d in domains_to_check\n\t\tif not (\n\t\t d.split(\".\", 1)[0] in {\"www\", \"autoconfig\", \"autodiscover\", \"mta-sts\"}\n\t\t and len(d.split(\".\", 1)) == 2\n\t\t and d.split(\".\", 1)[1] in domains_to_check\n\t\t)\n\t]\n\n\t# Get the list of domains that we don't serve web for because of a custom CNAME/A record.\n\tdomains_with_a_records = get_domains_with_a_records(env)\n\n\t# Serial version:\n\t#for domain in sort_domains(domains_to_check, env):\n\t#\trun_domain_checks_on_domain(domain, rounded_time, env, dns_domains, dns_zonefiles, mail_domains, web_domains)\n\n\t# Parallelize the checks across a worker pool.\n\targs = ((domain, rounded_time, env, dns_domains, dns_zonefiles, mail_domains, web_domains, domains_with_a_records)\n\t\tfor domain in domains_to_check)\n\tret = pool.starmap(run_domain_checks_on_domain, args, chunksize=1)\n\tret = dict(ret) # (domain, output) => { domain: output }\n\tfor domain in sort_domains(ret, env):\n\t\tret[domain].playback(output)\n\ndef run_domain_checks_on_domain(domain, rounded_time, env, dns_domains, dns_zonefiles, mail_domains, web_domains, domains_with_a_records):\n\toutput = BufferedOutput()\n\n\t# When running inside Flask, the worker threads don't get a thread pool automatically.\n\t# Also this method is called in a forked worker pool, so creating a new loop is probably\n\t# a good idea.\n\tasyncio.set_event_loop(asyncio.new_event_loop())\n\n\t# we'd move this up, but this returns non-pickleable values\n\tssl_certificates = get_ssl_certificates(env)\n\n\t# The domain is IDNA-encoded in the database, but for display use Unicode.\n\ttry:\n\t\tdomain_display = idna.decode(domain.encode('ascii'))\n\t\toutput.add_heading(domain_display)\n\texcept (ValueError, UnicodeError, idna.IDNAError) as e:\n\t\t# Looks like we have some invalid data in our database.\n\t\toutput.add_heading(domain)\n\t\toutput.print_error(\"Domain name is invalid: \" + str(e))\n\n\tif domain == env[\"PRIMARY_HOSTNAME\"]:\n\t\tcheck_primary_hostname_dns(domain, env, output, dns_domains, dns_zonefiles)\n\n\tif domain in dns_domains:\n\t\tcheck_dns_zone(domain, env, output, dns_zonefiles)\n\n\tif domain in mail_domains:\n\t\tcheck_mail_domain(domain, env, output)\n\n\tif domain in web_domains:\n\t\tcheck_web_domain(domain, rounded_time, ssl_certificates, env, output)\n\n\tif domain in dns_domains:\n\t\tcheck_dns_zone_suggestions(domain, env, output, dns_zonefiles, domains_with_a_records)\n\n\t# Check auto-configured subdomains. See run_domain_checks.\n\t# Skip mta-sts because we check the policy directly.\n\tfor label in (\"www\", \"autoconfig\", \"autodiscover\"):\n\t\tsubdomain = label + \".\" + domain\n\t\tif subdomain in web_domains or subdomain in mail_domains:\n\t\t\t# Run checks.\n\t\t\tsubdomain_output = run_domain_checks_on_domain(subdomain, rounded_time, env, dns_domains, dns_zonefiles, mail_domains, web_domains, domains_with_a_records)\n\n\t\t\t# Prepend the domain name to the start of each check line, and then add to the\n\t\t\t# checks for this domain.\n\t\t\tfor attr, args, kwargs in subdomain_output[1].buf:\n\t\t\t\tif attr == \"add_heading\":\n\t\t\t\t\t# Drop the heading, but use its text as the subdomain name in\n\t\t\t\t\t# each line since it is in Unicode form.\n\t\t\t\t\tsubdomain = args[0]\n\t\t\t\t\tcontinue\n\t\t\t\tif len(args) == 1 and isinstance(args[0], str):\n\t\t\t\t\targs = [ subdomain + \": \" + args[0] ]\n\t\t\t\tgetattr(output, attr)(*args, **kwargs)\n\n\treturn (domain, output)\n\ndef check_primary_hostname_dns(domain, env, output, dns_domains, dns_zonefiles):\n\t# If a DS record is set on the zone containing this domain, check DNSSEC now.\n\thas_dnssec = False\n\tfor zone in dns_domains:\n\t\tif (zone == domain or domain.endswith(\".\" + zone)) and query_dns(zone, \"DS\", nxdomain=None) is not None:\n\t\t\thas_dnssec = True\n\t\t\tcheck_dnssec(zone, env, output, dns_zonefiles, is_checking_primary=True)\n\n\tip = query_dns(domain, \"A\")\n\tns_ips = query_dns(\"ns1.\" + domain, \"A\") + '/' + query_dns(\"ns2.\" + domain, \"A\")\n\tmy_ips = env['PUBLIC_IP'] + ((\" / \"+env['PUBLIC_IPV6']) if env.get(\"PUBLIC_IPV6\") else \"\")\n\n\t# Check that the ns1/ns2 hostnames resolve to A records. This information probably\n\t# comes from the TLD since the information is set at the registrar as glue records.\n\t# We're probably not actually checking that here but instead checking that we, as\n\t# the nameserver, are reporting the right info --- but if the glue is incorrect this\n\t# will probably fail.\n\tif ns_ips == env['PUBLIC_IP'] + '/' + env['PUBLIC_IP']:\n\t\toutput.print_ok(\"Nameserver glue records are correct at registrar. [ns1/ns2.{} ↦ {}]\".format(env['PRIMARY_HOSTNAME'], env['PUBLIC_IP']))\n\n\telif ip == env['PUBLIC_IP']:\n\t\t# The NS records are not what we expect, but the domain resolves correctly, so\n\t\t# the user may have set up external DNS. List this discrepancy as a warning.\n\t\toutput.print_warning(\"\"\"Nameserver glue records (ns1.{} and ns2.{}) should be configured at your domain name\n\t\t\tregistrar as having the IP address of this box ({}). They currently report addresses of {}. If you have set up External DNS, this may be OK.\"\"\".format(env['PRIMARY_HOSTNAME'], env['PRIMARY_HOSTNAME'], env['PUBLIC_IP'], ns_ips))\n\n\telse:\n\t\toutput.print_error(\"\"\"Nameserver glue records are incorrect. The ns1.{} and ns2.{} nameservers must be configured at your domain name\n\t\t\tregistrar as having the IP address {}. They currently report addresses of {}. It may take several hours for\n\t\t\tpublic DNS to update after a change.\"\"\".format(env['PRIMARY_HOSTNAME'], env['PRIMARY_HOSTNAME'], env['PUBLIC_IP'], ns_ips))\n\n\t# Check that PRIMARY_HOSTNAME resolves to PUBLIC_IP[V6] in public DNS.\n\tipv6 = query_dns(domain, \"AAAA\") if env.get(\"PUBLIC_IPV6\") else None\n\tif ip == env['PUBLIC_IP'] and not (ipv6 and env['PUBLIC_IPV6'] and ipv6 != normalize_ip(env['PUBLIC_IPV6'])):\n\t\toutput.print_ok(\"Domain resolves to box's IP address. [{} ↦ {}]\".format(env['PRIMARY_HOSTNAME'], my_ips))\n\telse:\n\t\toutput.print_error(\"\"\"This domain must resolve to this box's IP address ({}) in public DNS but it currently resolves\n\t\t\tto {}. It may take several hours for public DNS to update after a change. This problem may result from other\n\t\t\tissues listed above.\"\"\".format(my_ips, ip + ((\" / \" + ipv6) if ipv6 is not None else \"\")))\n\n\n\t# Check reverse DNS matches the PRIMARY_HOSTNAME. Note that it might not be\n\t# a DNS zone if it is a subdomain of another domain we have a zone for.\n\texisting_rdns_v4 = query_dns(dns.reversename.from_address(env['PUBLIC_IP']), \"PTR\")\n\texisting_rdns_v6 = query_dns(dns.reversename.from_address(env['PUBLIC_IPV6']), \"PTR\") if env.get(\"PUBLIC_IPV6\") else None\n\tif existing_rdns_v4 == domain and existing_rdns_v6 in {None, domain}:\n\t\toutput.print_ok(\"Reverse DNS is set correctly at ISP. [{} ↦ {}]\".format(my_ips, env['PRIMARY_HOSTNAME']))\n\telif existing_rdns_v4 == existing_rdns_v6 or existing_rdns_v6 is None:\n\t\toutput.print_error(f\"\"\"This box's reverse DNS is currently {existing_rdns_v4}, but it should be {domain}. Your ISP or cloud provider will have instructions\n\t\t\ton setting up reverse DNS for this box.\"\"\" )\n\telse:\n\t\toutput.print_error(f\"\"\"This box's reverse DNS is currently {existing_rdns_v4} (IPv4) and {existing_rdns_v6} (IPv6), but it should be {domain}. Your ISP or cloud provider will have instructions\n\t\t\ton setting up reverse DNS for this box.\"\"\" )\n\n\t# Check the TLSA record.\n\ttlsa_qname = \"_25._tcp.\" + domain\n\ttlsa25 = query_dns(tlsa_qname, \"TLSA\", nxdomain=None)\n\ttlsa25_expected = build_tlsa_record(env)\n\tif tlsa25 == tlsa25_expected:\n\t\toutput.print_ok(\"\"\"The DANE TLSA record for incoming mail is correct (%s).\"\"\" % tlsa_qname,)\n\telif tlsa25 is None:\n\t\tif has_dnssec:\n\t\t\t# Omit a warning about it not being set if DNSSEC isn't enabled,\n\t\t\t# since TLSA shouldn't be used without DNSSEC.\n\t\t\toutput.print_warning(\"\"\"The DANE TLSA record for incoming mail is not set. This is optional.\"\"\")\n\telse:\n\t\toutput.print_error(f\"\"\"The DANE TLSA record for incoming mail ({tlsa_qname}) is not correct. It is '{tlsa25}' but it should be '{tlsa25_expected}'.\n\t\t\tIt may take several hours for public DNS to update after a change.\"\"\")\n\n\t# Check that the hostmaster@ email address exists.\n\tcheck_alias_exists(\"Hostmaster contact address\", \"hostmaster@\" + domain, env, output)\n\ndef check_alias_exists(alias_name, alias, env, output):\n\tmail_aliases = {address: receivers for address, receivers, *_ in get_mail_aliases(env)}\n\tif alias in mail_aliases:\n\t\tif mail_aliases[alias]:\n\t\t\toutput.print_ok(f\"{alias_name} exists as a mail alias. [{alias} ↦ {mail_aliases[alias]}]\")\n\t\telse:\n\t\t\toutput.print_error(\"\"\"You must set the destination of the mail alias for %s to direct email to you or another administrator.\"\"\" % alias)\n\telse:\n\t\toutput.print_error(\"\"\"You must add a mail alias for %s which directs email to you or another administrator.\"\"\" % alias)\n\ndef check_dns_zone(domain, env, output, dns_zonefiles):\n\t# If a DS record is set at the registrar, check DNSSEC first because it will affect the NS query.\n\t# If it is not set, we suggest it last.\n\tif query_dns(domain, \"DS\", nxdomain=None) is not None:\n\t\tcheck_dnssec(domain, env, output, dns_zonefiles)\n\n\t# We provide a DNS zone for the domain. It should have NS records set up\n\t# at the domain name's registrar pointing to this box. The secondary DNS\n\t# server may be customized.\n\t# (I'm not sure whether this necessarily tests the TLD's configuration,\n\t# as it should, or if one successful NS line at the TLD will result in\n\t# this query being answered by the box, which would mean the test is only\n\t# half working.)\n\n\tcustom_dns_records = list(get_custom_dns_config(env)) # generator => list so we can reuse it\n\tcorrect_ip = \"; \".join(sorted(get_custom_dns_records(custom_dns_records, domain, \"A\"))) or env['PUBLIC_IP']\n\tcustom_secondary_ns = get_secondary_dns(custom_dns_records, mode=\"NS\")\n\tsecondary_ns = custom_secondary_ns or [\"ns2.\" + env['PRIMARY_HOSTNAME']]\n\n\texisting_ns = query_dns(domain, \"NS\")\n\tcorrect_ns = \"; \".join(sorted([\"ns1.\" + env[\"PRIMARY_HOSTNAME\"], *secondary_ns]))\n\tip = query_dns(domain, \"A\")\n\n\tprobably_external_dns = False\n\n\tif existing_ns.lower() == correct_ns.lower():\n\t\toutput.print_ok(\"Nameservers are set correctly at registrar. [%s]\" % correct_ns)\n\telif ip == correct_ip:\n\t\t# The domain resolves correctly, so maybe the user is using External DNS.\n\t\toutput.print_warning(f\"\"\"The nameservers set on this domain at your domain name registrar should be {correct_ns}. They are currently {existing_ns}.\n\t\t\tIf you are using External DNS, this may be OK.\"\"\" )\n\t\tprobably_external_dns = True\n\telse:\n\t\toutput.print_error(f\"\"\"The nameservers set on this domain are incorrect. They are currently {existing_ns}. Use your domain name registrar's\n\t\t\tcontrol panel to set the nameservers to {correct_ns}.\"\"\" )\n\n\t# Check that each custom secondary nameserver resolves the IP address.\n\n\tif custom_secondary_ns and not probably_external_dns:\n\t\tfor ns in custom_secondary_ns:\n\t\t\t# We must first resolve the nameserver to an IP address so we can query it.\n\t\t\tns_ips = query_dns(ns, \"A\")\n\t\t\tif not ns_ips or ns_ips in {'[Not Set]', '[timeout]'}:\n\t\t\t\toutput.print_error(\"Secondary nameserver %s is not valid (it doesn't resolve to an IP address).\" % ns)\n\t\t\t\tcontinue\n\t\t\t# Choose the first IP if nameserver returns multiple\n\t\t\tns_ip = ns_ips.split('; ')[0]\n\n\t\t\t# Now query it to see what it says about this domain.\n\t\t\tip = query_dns(domain, \"A\", at=ns_ip, nxdomain=None)\n\t\t\tif ip == correct_ip:\n\t\t\t\toutput.print_ok(\"Secondary nameserver %s resolved the domain correctly.\" % ns)\n\t\t\telif ip is None:\n\t\t\t\toutput.print_error(\"Secondary nameserver %s is not configured to resolve this domain.\" % ns)\n\t\t\telse:\n\t\t\t\toutput.print_error(f\"Secondary nameserver {ns} is not configured correctly. (It resolved this domain as {ip}. It should be {correct_ip}.)\")\n\ndef check_dns_zone_suggestions(domain, env, output, dns_zonefiles, domains_with_a_records):\n\t# Warn if a custom DNS record is preventing this or the automatic www redirect from\n\t# being served.\n\tif domain in domains_with_a_records:\n\t\toutput.print_warning(\"\"\"Web has been disabled for this domain because you have set a custom DNS record.\"\"\")\n\tif \"www.\" + domain in domains_with_a_records:\n\t\toutput.print_warning(\"\"\"A redirect from 'www.%s' has been disabled for this domain because you have set a custom DNS record on the www subdomain.\"\"\" % domain)\n\n\t# Since DNSSEC is optional, if a DS record is NOT set at the registrar suggest it.\n\t# (If it was set, we did the check earlier.)\n\tif query_dns(domain, \"DS\", nxdomain=None) is None:\n\t\tcheck_dnssec(domain, env, output, dns_zonefiles)\n\n\ndef check_dnssec(domain, env, output, dns_zonefiles, is_checking_primary=False):\n\t# See if the domain has a DS record set at the registrar. The DS record must\n\t# match one of the keys that we've used to sign the zone. It may use one of\n\t# several hashing algorithms. We've pre-generated all possible valid DS\n\t# records, although some will be preferred.\n\n\talg_name_map = { '7': 'RSASHA1-NSEC3-SHA1', '8': 'RSASHA256', '13': 'ECDSAP256SHA256' }\n\tdigalg_name_map = { '1': 'SHA-1', '2': 'SHA-256', '4': 'SHA-384' }\n\n\t# Read in the pre-generated DS records\n\texpected_ds_records = { }\n\tds_file = '/etc/nsd/zones/' + dns_zonefiles[domain] + '.ds'\n\tif not os.path.exists(ds_file): return # Domain is in our database but DNS has not yet been updated.\n\twith open(ds_file, encoding=\"utf-8\") as f:\n\t\tfor rr_ds in f:\n\t\t\trr_ds = rr_ds.rstrip()\n\t\t\tds_keytag, ds_alg, ds_digalg, ds_digest = rr_ds.split(\"\\t\")[4].split(\" \")\n\n\t\t\t# Some registrars may want the public key so they can compute the digest. The DS\n\t\t\t# record that we suggest using is for the KSK (and that's how the DS records were generated).\n\t\t\t# We'll also give the nice name for the key algorithm.\n\t\t\tdnssec_keys = load_env_vars_from_file(os.path.join(env['STORAGE_ROOT'], 'dns/dnssec/%s.conf' % alg_name_map[ds_alg]))\n\t\t\twith open(os.path.join(env['STORAGE_ROOT'], 'dns/dnssec/' + dnssec_keys['KSK'] + '.key'), encoding=\"utf-8\") as f:\n\t\t\t\tdnsssec_pubkey = f.read().split(\"\\t\")[3].split(\" \")[3]\n\n\t\t\texpected_ds_records[ (ds_keytag, ds_alg, ds_digalg, ds_digest) ] = {\n\t\t\t\t\"record\": rr_ds,\n\t\t\t\t\"keytag\": ds_keytag,\n\t\t\t\t\"alg\": ds_alg,\n\t\t\t\t\"alg_name\": alg_name_map[ds_alg],\n\t\t\t\t\"digalg\": ds_digalg,\n\t\t\t\t\"digalg_name\": digalg_name_map[ds_digalg],\n\t\t\t\t\"digest\": ds_digest,\n\t\t\t\t\"pubkey\": dnsssec_pubkey,\n\t\t\t}\n\n\t# Query public DNS for the DS record at the registrar.\n\tds = query_dns(domain, \"DS\", nxdomain=None, as_list=True)\n\tif ds is None or isinstance(ds, str): ds = []\n\n\t# There may be more that one record, so we get the result as a list.\n\t# Filter out records that don't look valid, just in case, and split\n\t# each record on spaces.\n\tds = [tuple(str(rr).split(\" \")) for rr in ds if len(str(rr).split(\" \")) == 4]\n\n\tif len(ds) == 0:\n\t\toutput.print_warning(\"\"\"This domain's DNSSEC DS record is not set. The DS record is optional. The DS record activates DNSSEC. See below for instructions.\"\"\")\n\telse:\n\t\tmatched_ds = set(ds) & set(expected_ds_records)\n\t\tif matched_ds:\n\t\t\t# At least one DS record matches one that corresponds with one of the ways we signed\n\t\t\t# the zone, so it is valid.\n\t\t\t#\n\t\t\t# But it may not be preferred. Only algorithm 13 is preferred. Warn if any of the\n\t\t\t# matched zones uses a different algorithm.\n\t\t\tif {r[1] for r in matched_ds} == { '13' } and {r[2] for r in matched_ds} <= { '2', '4' }: # all are alg 13 and digest type 2 or 4\n\t\t\t\toutput.print_ok(\"DNSSEC 'DS' record is set correctly at registrar.\")\n\t\t\t\treturn\n\t\t\telif len([r for r in matched_ds if r[1] == '13' and r[2] in { '2', '4' }]) > 0: # some but not all are alg 13\n\t\t\t\toutput.print_ok(\"DNSSEC 'DS' record is set correctly at registrar. (Records using algorithm other than ECDSAP256SHA256 and digest types other than SHA-256/384 should be removed.)\")\n\t\t\t\treturn\n\t\t\telse: # no record uses alg 13\n\t\t\t\toutput.print_warning(\"\"\"DNSSEC 'DS' record set at registrar is valid but should be updated to ECDSAP256SHA256 and SHA-256 (see below).\n\t\t\t\tIMPORTANT: Do not delete existing DNSSEC 'DS' records for this domain until confirmation that the new DNSSEC 'DS' record\n\t\t\t\tfor this domain is valid.\"\"\")\n\t\telse:\n\t\t\tif is_checking_primary:\n\t\t\t\toutput.print_error(\"\"\"The DNSSEC 'DS' record for %s is incorrect. See further details below.\"\"\" % domain)\n\t\t\t\treturn\n\t\t\toutput.print_error(\"\"\"This domain's DNSSEC DS record is incorrect. The chain of trust is broken between the public DNS system\n\t\t\t\tand this machine's DNS server. It may take several hours for public DNS to update after a change. If you did not recently\n\t\t\t\tmake a change, you must resolve this immediately (see below).\"\"\")\n\n\toutput.print_line(\"\"\"Follow the instructions provided by your domain name registrar to set a DS record.\n\t\tRegistrars support different sorts of DS records. Use the first option that works:\"\"\")\n\tpreferred_ds_order = [(7, 2), (8, 4), (13, 4), (8, 2), (13, 2)] # low to high, see https://github.com/mail-in-a-box/mailinabox/issues/1998\n\n\tdef preferred_ds_order_func(ds_suggestion):\n\t\tk = (int(ds_suggestion['alg']), int(ds_suggestion['digalg']))\n\t\tif k in preferred_ds_order:\n\t\t\treturn preferred_ds_order.index(k)\n\t\treturn -1 # index before first item\n\toutput.print_line(\"\")\n\tfor i, ds_suggestion in enumerate(sorted(expected_ds_records.values(), key=preferred_ds_order_func, reverse=True)):\n\t\tif preferred_ds_order_func(ds_suggestion) == -1: continue # don't offer record types that the RFC says we must not offer\n\t\toutput.print_line(\"\")\n\t\toutput.print_line(\"Option \" + str(i+1) + \":\")\n\t\toutput.print_line(\"----------\")\n\t\toutput.print_line(\"Key Tag: \" + ds_suggestion['keytag'])\n\t\toutput.print_line(\"Key Flags: KSK / 257\")\n\t\toutput.print_line(\"Algorithm: {} / {}\".format(ds_suggestion['alg'], ds_suggestion['alg_name']))\n\t\toutput.print_line(\"Digest Type: {} / {}\".format(ds_suggestion['digalg'], ds_suggestion['digalg_name']))\n\t\toutput.print_line(\"Digest: \" + ds_suggestion['digest'])\n\t\toutput.print_line(\"Public Key: \")\n\t\toutput.print_line(ds_suggestion['pubkey'], monospace=True)\n\t\toutput.print_line(\"\")\n\t\toutput.print_line(\"Bulk/Record Format:\")\n\t\toutput.print_line(ds_suggestion['record'], monospace=True)\n\tif len(ds) > 0:\n\t\toutput.print_line(\"\")\n\t\toutput.print_line(\"The DS record is currently set to:\")\n\t\tfor rr in sorted(ds):\n\t\t\toutput.print_line(\"Key Tag: {}, Algorithm: {}, Digest Type: {}, Digest: {}\".format(*rr))\n\ndef check_mail_domain(domain, env, output):\n\t# Check the MX record.\n\n\trecommended_mx = \"10 \" + env['PRIMARY_HOSTNAME']\n\tmx = query_dns(domain, \"MX\", nxdomain=None)\n\n\tif mx is None or mx == \"[timeout]\":\n\t\tmxhost = None\n\telse:\n\t\t# query_dns returns a semicolon-delimited list\n\t\t# of priority-host pairs.\n\t\tmxhost = mx.split('; ')[0].split(' ')[1]\n\n\tif mxhost is None:\n\t\t# A missing MX record is okay on the primary hostname because\n\t\t# the primary hostname's A record (the MX fallback) is... itself,\n\t\t# which is what we want the MX to be.\n\t\tif domain == env['PRIMARY_HOSTNAME']:\n\t\t\toutput.print_ok(f\"Domain's email is directed to this domain. [{domain} has no MX record, which is ok]\")\n\n\t\t# And a missing MX record is okay on other domains if the A record\n\t\t# matches the A record of the PRIMARY_HOSTNAME. Actually this will\n\t\t# probably confuse DANE TLSA, but we'll let that slide for now.\n\t\telse:\n\t\t\tdomain_a = query_dns(domain, \"A\", nxdomain=None)\n\t\t\tprimary_a = query_dns(env['PRIMARY_HOSTNAME'], \"A\", nxdomain=None)\n\t\t\tif domain_a is not None and domain_a == primary_a:\n\t\t\t\toutput.print_ok(f\"Domain's email is directed to this domain. [{domain} has no MX record but its A record is OK]\")\n\t\t\telse:\n\t\t\t\toutput.print_error(f\"\"\"This domain's DNS MX record is not set. It should be '{recommended_mx}'. Mail will not\n\t\t\t\t\tbe delivered to this box. It may take several hours for public DNS to update after a\n\t\t\t\t\tchange. This problem may result from other issues listed here.\"\"\")\n\n\telif mxhost == env['PRIMARY_HOSTNAME']:\n\t\tgood_news = f\"Domain's email is directed to this domain. [{domain} ↦ {mx}]\"\n\t\tif mx != recommended_mx:\n\t\t\tgood_news += f\" This configuration is non-standard. The recommended configuration is '{recommended_mx}'.\"\n\t\toutput.print_ok(good_news)\n\n\t\t# Check MTA-STS policy.\n\t\tloop = asyncio.new_event_loop()\n\t\tsts_resolver = postfix_mta_sts_resolver.resolver.STSResolver(loop=loop)\n\t\tvalid, policy = loop.run_until_complete(sts_resolver.resolve(domain))\n\t\tif valid == postfix_mta_sts_resolver.resolver.STSFetchResult.VALID:\n\t\t\tif policy[1].get(\"mx\") == [env['PRIMARY_HOSTNAME']] and policy[1].get(\"mode\") == \"enforce\": # policy[0] is the policyid\n\t\t\t\toutput.print_ok(\"MTA-STS policy is present.\")\n\t\t\telse:\n\t\t\t\toutput.print_error(f\"MTA-STS policy is present but has unexpected settings. [{policy[1]}]\")\n\t\telse:\n\t\t\toutput.print_error(f\"MTA-STS policy is missing: {valid}\")\n\n\telse:\n\t\toutput.print_error(f\"\"\"This domain's DNS MX record is incorrect. It is currently set to '{mx}' but should be '{recommended_mx}'. Mail will not\n\t\t\tbe delivered to this box. It may take several hours for public DNS to update after a change. This problem may result from\n\t\t\tother issues listed here.\"\"\")\n\n\t# Check that the postmaster@ email address exists. Not required if the domain has a\n\t# catch-all address or domain alias.\n\tif \"@\" + domain not in [address for address, *_ in get_mail_aliases(env)]:\n\t\tcheck_alias_exists(\"Postmaster contact address\", \"postmaster@\" + domain, env, output)\n\n\t# Stop if the domain is listed in the Spamhaus Domain Block List.\n\t# The user might have chosen a domain that was previously in use by a spammer\n\t# and will not be able to reliably send mail.\n\t\n\t# See https://www.spamhaus.org/news/article/807/using-our-public-mirrors-check-your-return-codes-now. for\n\t# information on spamhaus return codes\n\tdbl = query_dns(domain+'.dbl.spamhaus.org', \"A\", nxdomain=None)\n\tif dbl is None:\n\t\toutput.print_ok(\"Domain is not blacklisted by dbl.spamhaus.org.\")\n\telif dbl == \"[timeout]\":\n\t\toutput.print_warning(f\"Connection to dbl.spamhaus.org timed out. Could not determine whether the domain {domain} is blacklisted. Please try again later.\")\n\telif dbl == \"[Not Set]\":\n\t\toutput.print_warning(f\"Could not connect to dbl.spamhaus.org. Could not determine whether the domain {domain} is blacklisted. Please try again later.\")\n\telif dbl == \"127.255.255.252\":\n\t\toutput.print_warning(\"Incorrect spamhaus query: %s. Could not determine whether the domain %s is blacklisted.\" % (domain+'.dbl.spamhaus.org', domain))\n\telif dbl == \"127.255.255.254\":\n\t\toutput.print_warning(\"Mail-in-a-Box is configured to use a public DNS server. This is not supported by spamhaus. Could not determine whether the domain {} is blacklisted.\".format(domain))\n\telif dbl == \"127.255.255.255\":\n\t\toutput.print_warning(\"Too many queries have been performed on the spamhaus server. Could not determine whether the domain {} is blacklisted.\".format(domain))\n\telse:\n\t\toutput.print_error(f\"\"\"This domain is listed in the Spamhaus Domain Block List (code {dbl}),\n\t\t\twhich may prevent recipients from receiving your mail.\n\t\t\tSee http://www.spamhaus.org/dbl/ and http://www.spamhaus.org/query/domain/{domain}.\"\"\")\n\ndef check_web_domain(domain, rounded_time, ssl_certificates, env, output):\n\t# See if the domain's A record resolves to our PUBLIC_IP. This is already checked\n\t# for PRIMARY_HOSTNAME, for which it is required for mail specifically. For it and\n\t# other domains, it is required to access its website.\n\tif domain != env['PRIMARY_HOSTNAME']:\n\t\tok_values = []\n\t\tfor (rtype, expected) in ((\"A\", env['PUBLIC_IP']), (\"AAAA\", env.get('PUBLIC_IPV6'))):\n\t\t\tif not expected: continue # IPv6 is not configured\n\t\t\tvalue = query_dns(domain, rtype)\n\t\t\tif value == normalize_ip(expected):\n\t\t\t\tok_values.append(value)\n\t\t\telse:\n\t\t\t\toutput.print_error(f\"\"\"This domain should resolve to this box's IP address ({rtype} {expected}) if you would like the box to serve\n\t\t\t\t\twebmail or a website on this domain. The domain currently resolves to {value} in public DNS. It may take several hours for\n\t\t\t\t\tpublic DNS to update after a change. This problem may result from other issues listed here.\"\"\")\n\t\t\t\treturn\n\n\t\t# If both A and AAAA are correct...\n\t\toutput.print_ok(\"Domain resolves to this box's IP address. [{} ↦ {}]\".format(domain, '; '.join(ok_values)))\n\n\n\t# We need a TLS certificate for PRIMARY_HOSTNAME because that's where the\n\t# user will log in with IMAP or webmail. Any other domain we serve a\n\t# website for also needs a signed certificate.\n\tcheck_ssl_cert(domain, rounded_time, ssl_certificates, env, output)\n\ndef query_dns(qname, rtype, nxdomain='[Not Set]', at=None, as_list=False):\n\t# Make the qname absolute by appending a period. Without this, dns.resolver.query\n\t# will fall back a failed lookup to a second query with this machine's hostname\n\t# appended. This has been causing some false-positive Spamhaus reports. The\n\t# reverse DNS lookup will pass a dns.name.Name instance which is already\n\t# absolute so we should not modify that.\n\tif isinstance(qname, str):\n\t\tqname += \".\"\n\n\t# Use the default nameservers (as defined by the system, which is our locally\n\t# running bind server), or if the 'at' argument is specified, use that host\n\t# as the nameserver.\n\tresolver = dns.resolver.get_default_resolver()\n\n\t# Make sure at is not a string that cannot be used as a nameserver\n\tif at and at not in {'[Not set]', '[timeout]'}:\n\t\tresolver = dns.resolver.Resolver()\n\t\tresolver.nameservers = [at]\n\n\t# Set a timeout so that a non-responsive server doesn't hold us back.\n\tresolver.timeout = 5\n\t# The number of seconds to spend trying to get an answer to the question. If the\n\t# lifetime expires a dns.exception.Timeout exception will be raised.\n\tresolver.lifetime = 5\n\n\t# Do the query.\n\ttry:\n\t\tresponse = resolver.resolve(qname, rtype)\n\texcept (dns.resolver.NoNameservers, dns.resolver.NXDOMAIN, dns.resolver.NoAnswer):\n\t\t# Host did not have an answer for this query; not sure what the\n\t\t# difference is between the two exceptions.\n\t\treturn nxdomain\n\texcept dns.exception.Timeout:\n\t\treturn \"[timeout]\"\n\n\t# Normalize IP addresses. IP address --- especially IPv6 addresses --- can\n\t# be expressed in equivalent string forms. Canonicalize the form before\n\t# returning them. The caller should normalize any IP addresses the result\n\t# of this method is compared with.\n\tif rtype in {\"A\", \"AAAA\"}:\n\t\tresponse = [normalize_ip(str(r)) for r in response]\n\n\tif as_list:\n\t\treturn response\n\n\t# There may be multiple answers; concatenate the response. Remove trailing\n\t# periods from responses since that's how qnames are encoded in DNS but is\n\t# confusing for us. The order of the answers doesn't matter, so sort so we\n\t# can compare to a well known order.\n\treturn \"; \".join(sorted(str(r).rstrip('.') for r in response))\n\ndef check_ssl_cert(domain, rounded_time, ssl_certificates, env, output):\n\t# Check that TLS certificate is signed.\n\n\t# Skip the check if the A record is not pointed here.\n\tif query_dns(domain, \"A\", None) not in {env['PUBLIC_IP'], None}: return\n\n\t# Where is the certificate file stored?\n\ttls_cert = get_domain_ssl_files(domain, ssl_certificates, env, allow_missing_cert=True)\n\tif tls_cert is None:\n\t\toutput.print_warning(\"\"\"No TLS (SSL) certificate is installed for this domain. Visitors to a website on\n\t\t\tthis domain will get a security warning. If you are not serving a website on this domain, you do\n\t\t\tnot need to take any action. Use the TLS Certificates page in the control panel to install a\n\t\t\tTLS certificate.\"\"\")\n\t\treturn\n\n\t# Check that the certificate is good.\n\n\tcert_status, cert_status_details = check_certificate(domain, tls_cert[\"certificate\"], tls_cert[\"private-key\"], rounded_time=rounded_time)\n\n\tif cert_status == \"OK\":\n\t\t# The certificate is ok. The details has expiry info.\n\t\toutput.print_ok(\"TLS (SSL) certificate is signed & valid. \" + cert_status_details)\n\n\telif cert_status == \"SELF-SIGNED\":\n\t\t# Offer instructions for purchasing a signed certificate.\n\t\tif domain == env['PRIMARY_HOSTNAME']:\n\t\t\toutput.print_error(\"\"\"The TLS (SSL) certificate for this domain is currently self-signed. You will get a security\n\t\t\twarning when you check or send email and when visiting this domain in a web browser (for webmail or\n\t\t\tstatic site hosting).\"\"\")\n\t\telse:\n\t\t\toutput.print_error(\"\"\"The TLS (SSL) certificate for this domain is self-signed.\"\"\")\n\n\telse:\n\t\toutput.print_error(\"The TLS (SSL) certificate has a problem: \" + cert_status)\n\t\tif cert_status_details:\n\t\t\toutput.print_line(\"\")\n\t\t\toutput.print_line(cert_status_details)\n\t\t\toutput.print_line(\"\")\n\n_apt_updates = None\ndef list_apt_updates(apt_update=True):\n\t# See if we have this information cached recently.\n\t# Keep the information for 8 hours.\n\tglobal _apt_updates\n\tif _apt_updates is not None and _apt_updates[0] > datetime.datetime.now() - datetime.timedelta(hours=8):\n\t\treturn _apt_updates[1]\n\n\t# Run apt-get update to refresh package list. This should be running daily\n\t# anyway, so on the status checks page don't do this because it is slow.\n\tif apt_update:\n\t\tshell(\"check_call\", [\"/usr/bin/apt-get\", \"-qq\", \"update\"])\n\n\t# Run apt-get upgrade in simulate mode to get a list of what\n\t# it would do.\n\tsimulated_install = shell(\"check_output\", [\"/usr/bin/apt-get\", \"-qq\", \"-s\", \"upgrade\"])\n\tpkgs = []\n\tfor line in simulated_install.split('\\n'):\n\t\tif line.strip() == \"\":\n\t\t\tcontinue\n\t\tif re.match(r'^Conf .*', line):\n\t\t\t # remove these lines, not informative\n\t\t\tcontinue\n\t\tm = re.match(r'^Inst (.*) \\[(.*)\\] \\((\\S*)', line)\n\t\tif m:\n\t\t\tpkgs.append({ \"package\": m.group(1), \"version\": m.group(3), \"current_version\": m.group(2) })\n\t\telse:\n\t\t\tpkgs.append({ \"package\": \"[\" + line + \"]\", \"version\": \"\", \"current_version\": \"\" })\n\n\t# Cache for future requests.\n\t_apt_updates = (datetime.datetime.now(), pkgs)\n\n\treturn pkgs\n\ndef what_version_is_this(env):\n\t# This function runs `git describe --always --abbrev=0` on the Mail-in-a-Box installation directory.\n\t# Git may not be installed and Mail-in-a-Box may not have been cloned from github,\n\t# so this function may raise all sorts of exceptions.\n\tmiab_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\treturn shell(\"check_output\", [\"/usr/bin/git\", \"describe\", \"--always\", \"--abbrev=0\"], env={\"GIT_DIR\": os.path.join(miab_dir, '.git')}).strip()\n\ndef get_latest_miab_version():\n\t# This pings https://mailinabox.email/setup.sh and extracts the tag named in\n\t# the script to determine the current product version.\n from urllib.request import urlopen, HTTPError, URLError\n\n try:\n return re.search(b'TAG=(.*)', urlopen(\"https://mailinabox.email/setup.sh?ping=1\", timeout=5).read()).group(1).decode(\"utf8\")\n except (TimeoutError, HTTPError, URLError):\n return None\n\ndef check_miab_version(env, output):\n\tconfig = load_settings(env)\n\n\ttry:\n\t\tthis_ver = what_version_is_this(env)\n\texcept:\n\t\tthis_ver = \"Unknown\"\n\n\tif config.get(\"privacy\", True):\n\t\toutput.print_warning(\"You are running version Mail-in-a-Box %s. Mail-in-a-Box version check disabled by privacy setting.\" % this_ver)\n\telse:\n\t\tlatest_ver = get_latest_miab_version()\n\n\t\tif this_ver == latest_ver:\n\t\t\toutput.print_ok(\"Mail-in-a-Box is up to date. You are running version %s.\" % this_ver)\n\t\telif latest_ver is None:\n\t\t\toutput.print_error(\"Latest Mail-in-a-Box version could not be determined. You are running version %s.\" % this_ver)\n\t\telse:\n\t\t\toutput.print_error(f\"A new version of Mail-in-a-Box is available. You are running version {this_ver}. The latest version is {latest_ver}. For upgrade instructions, see https://mailinabox.email. \")\n\ndef run_and_output_changes(env, pool):\n\timport json\n\tfrom difflib import SequenceMatcher\n\n\tout = ConsoleOutput()\n\n\t# Run status checks.\n\tcur = BufferedOutput()\n\trun_checks(True, env, cur, pool)\n\n\t# Load previously saved status checks.\n\tcache_fn = \"/var/cache/mailinabox/status_checks.json\"\n\tif os.path.exists(cache_fn):\n\t\twith open(cache_fn, encoding=\"utf-8\") as f:\n\t\t\ttry:\n\t\t\t\tprev = json.load(f)\n\t\t\texcept json.JSONDecodeError:\n\t\t\t\tprev = []\n\n\t\t# Group the serial output into categories by the headings.\n\t\tdef group_by_heading(lines):\n\t\t\tfrom collections import OrderedDict\n\t\t\tret = OrderedDict()\n\t\t\tk = []\n\t\t\tret[\"No Category\"] = k\n\t\t\tfor line_type, line_args, line_kwargs in lines:\n\t\t\t\tif line_type == \"add_heading\":\n\t\t\t\t\tk = []\n\t\t\t\t\tret[line_args[0]] = k\n\t\t\t\telse:\n\t\t\t\t\tk.append((line_type, line_args, line_kwargs))\n\t\t\treturn ret\n\t\tprev_status = group_by_heading(prev)\n\t\tcur_status = group_by_heading(cur.buf)\n\n\t\t# Compare the previous to the current status checks\n\t\t# category by category.\n\t\tfor category, cur_lines in cur_status.items():\n\t\t\tif category not in prev_status:\n\t\t\t\tout.add_heading(category + \" -- Added\")\n\t\t\t\tBufferedOutput(with_lines=cur_lines).playback(out)\n\t\t\telse:\n\t\t\t\t# Actual comparison starts here...\n\t\t\t\tprev_lines = prev_status[category]\n\t\t\t\tdef stringify(lines):\n\t\t\t\t\treturn [json.dumps(line) for line in lines]\n\t\t\t\tdiff = SequenceMatcher(None, stringify(prev_lines), stringify(cur_lines)).get_opcodes()\n\t\t\t\tfor op, i1, i2, j1, j2 in diff:\n\t\t\t\t\tif op == \"replace\":\n\t\t\t\t\t\tout.add_heading(category + \" -- Previously:\")\n\t\t\t\t\telif op == \"delete\":\n\t\t\t\t\t\tout.add_heading(category + \" -- Removed\")\n\t\t\t\t\tif op in {\"replace\", \"delete\"}:\n\t\t\t\t\t\tBufferedOutput(with_lines=prev_lines[i1:i2]).playback(out)\n\n\t\t\t\t\tif op == \"replace\":\n\t\t\t\t\t\tout.add_heading(category + \" -- Currently:\")\n\t\t\t\t\telif op == \"insert\":\n\t\t\t\t\t\tout.add_heading(category + \" -- Added\")\n\t\t\t\t\tif op in {\"replace\", \"insert\"}:\n\t\t\t\t\t\tBufferedOutput(with_lines=cur_lines[j1:j2]).playback(out)\n\n\t\tfor category, prev_lines in prev_status.items():\n\t\t\tif category not in cur_status:\n\t\t\t\tout.add_heading(category)\n\t\t\t\tout.print_warning(\"This section was removed.\")\n\n\t# Store the current status checks output for next time.\n\tos.makedirs(os.path.dirname(cache_fn), exist_ok=True)\n\twith open(cache_fn, \"w\", encoding=\"utf-8\") as f:\n\t\tjson.dump(cur.buf, f, indent=True)\n\ndef normalize_ip(ip):\n\t# Use ipaddress module to normalize the IPv6 notation and\n\t# ensure we are matching IPv6 addresses written in different\n\t# representations according to rfc5952.\n\timport ipaddress\n\ttry:\n\t\treturn str(ipaddress.ip_address(ip))\n\texcept:\n\t\treturn ip\n\nclass FileOutput:\n\tdef __init__(self, buf, width):\n\t\tself.buf = buf\n\t\tself.width = width\n\n\tdef add_heading(self, heading):\n\t\tprint(file=self.buf)\n\t\tprint(heading, file=self.buf)\n\t\tprint(\"=\" * len(heading), file=self.buf)\n\n\tdef print_ok(self, message):\n\t\tself.print_block(message, first_line=\"✓ \")\n\n\tdef print_error(self, message):\n\t\tself.print_block(message, first_line=\"✖ \")\n\n\tdef print_warning(self, message):\n\t\tself.print_block(message, first_line=\"? \")\n\n\tdef print_block(self, message, first_line=\" \"):\n\t\tprint(first_line, end='', file=self.buf)\n\t\tmessage = re.sub(\"\\n\\\\s*\", \" \", message)\n\t\twords = re.split(r\"(\\s+)\", message)\n\t\tlinelen = 0\n\t\tfor w in words:\n\t\t\tif self.width and (linelen + len(w) > self.width-1-len(first_line)):\n\t\t\t\tprint(file=self.buf)\n\t\t\t\tprint(\" \", end=\"\", file=self.buf)\n\t\t\t\tlinelen = 0\n\t\t\tif linelen == 0 and w.strip() == \"\": continue\n\t\t\tprint(w, end=\"\", file=self.buf)\n\t\t\tlinelen += len(w)\n\t\tprint(file=self.buf)\n\n\tdef print_line(self, message, monospace=False):\n\t\tfor line in message.split(\"\\n\"):\n\t\t\tself.print_block(line)\n\nclass ConsoleOutput(FileOutput):\n\tdef __init__(self):\n\t\tself.buf = sys.stdout\n\n\t\t# Do nice line-wrapping according to the size of the terminal.\n\t\t# The 'stty' program queries standard input for terminal information.\n\t\tif sys.stdin.isatty():\n\t\t\ttry:\n\t\t\t\tself.width = int(shell('check_output', ['stty', 'size']).split()[1])\n\t\t\texcept:\n\t\t\t\tself.width = 76\n\n\t\telse:\n\t\t\t# However if standard input is not a terminal, we would get\n\t\t\t# \"stty: standard input: Inappropriate ioctl for device\". So\n\t\t\t# we test with sys.stdin.isatty first, and if it is not a\n\t\t\t# terminal don't do any line wrapping. When this script is\n\t\t\t# run from cron, or if stdin has been redirected, this happens.\n\t\t\tself.width = None\n\nclass BufferedOutput:\n\t# Record all of the instance method calls so we can play them back later.\n\tdef __init__(self, with_lines=None):\n\t\tself.buf = with_lines if with_lines else []\n\tdef __getattr__(self, attr):\n\t\tif attr not in {\"add_heading\", \"print_ok\", \"print_error\", \"print_warning\", \"print_block\", \"print_line\"}:\n\t\t\traise AttributeError\n\t\t# Return a function that just records the call & arguments to our buffer.\n\t\tdef w(*args, **kwargs):\n\t\t\tself.buf.append((attr, args, kwargs))\n\t\treturn w\n\tdef playback(self, output):\n\t\tfor attr, args, kwargs in self.buf:\n\t\t\tgetattr(output, attr)(*args, **kwargs)\n\n\nif __name__ == \"__main__\":\n\tfrom utils import load_environment\n\n\tenv = load_environment()\n\n\tif len(sys.argv) == 1:\n\t\twith multiprocessing.pool.Pool(processes=10) as pool:\n\t\t\trun_checks(False, env, ConsoleOutput(), pool)\n\n\telif sys.argv[1] == \"--show-changes\":\n\t\twith multiprocessing.pool.Pool(processes=10) as pool:\n\t\t\trun_and_output_changes(env, pool)\n\n\telif sys.argv[1] == \"--check-primary-hostname\":\n\t\t# See if the primary hostname appears resolvable and has a signed certificate.\n\t\tdomain = env['PRIMARY_HOSTNAME']\n\t\tif query_dns(domain, \"A\") != env['PUBLIC_IP']:\n\t\t\tsys.exit(1)\n\t\tssl_certificates = get_ssl_certificates(env)\n\t\ttls_cert = get_domain_ssl_files(domain, ssl_certificates, env)\n\t\tif not os.path.exists(tls_cert[\"certificate\"]):\n\t\t\tsys.exit(1)\n\t\tcert_status, cert_status_details = check_certificate(domain, tls_cert[\"certificate\"], tls_cert[\"private-key\"], warn_if_expiring_soon=False)\n\t\tif cert_status != \"OK\":\n\t\t\tsys.exit(1)\n\t\tsys.exit(0)\n\n\telif sys.argv[1] == \"--version\":\n\t\tprint(what_version_is_this(env))\n\n\telif sys.argv[1] == \"--only\":\n\t\twith multiprocessing.pool.Pool(processes=10) as pool:\n\t\t\trun_checks(False, env, ConsoleOutput(), pool, domains_to_check=sys.argv[2:])\n\n\n\nFile: management/dns_update.py\n\n#!/usr/local/lib/mailinabox/env/bin/python\n\n# Creates DNS zone files for all of the domains of all of the mail users\n# and mail aliases and restarts nsd.\n########################################################################\n\nimport sys, os, os.path, datetime, re, hashlib, base64\nimport ipaddress\nimport rtyaml\nimport dns.resolver\n\nfrom utils import shell, load_env_vars_from_file, safe_domain_name, sort_domains, get_ssh_port\nfrom ssl_certificates import get_ssl_certificates, check_certificate\nimport contextlib\n\n# From https://stackoverflow.com/questions/3026957/how-to-validate-a-domain-name-using-regex-php/16491074#16491074\n# This regular expression matches domain names according to RFCs, it also accepts fqdn with an leading dot,\n# underscores, as well as asterisks which are allowed in domain names but not hostnames (i.e. allowed in\n# DNS but not in URLs), which are common in certain record types like for DKIM.\nDOMAIN_RE = r\"^(?!\\-)(?:[*][.])?(?:[a-zA-Z\\d\\-_]{0,62}[a-zA-Z\\d_]\\.){1,126}(?!\\d+)[a-zA-Z\\d_]{1,63}(\\.?)$\"\n\ndef get_dns_domains(env):\n\t# Add all domain names in use by email users and mail aliases, any\n\t# domains we serve web for (except www redirects because that would\n\t# lead to infinite recursion here) and ensure PRIMARY_HOSTNAME is in the list.\n\tfrom mailconfig import get_mail_domains\n\tfrom web_update import get_web_domains\n\tdomains = set()\n\tdomains |= set(get_mail_domains(env))\n\tdomains |= set(get_web_domains(env, include_www_redirects=False))\n\tdomains.add(env['PRIMARY_HOSTNAME'])\n\treturn domains\n\ndef get_dns_zones(env):\n\t# What domains should we create DNS zones for? Never create a zone for\n\t# a domain & a subdomain of that domain.\n\tdomains = get_dns_domains(env)\n\n\t# Exclude domains that are subdomains of other domains we know. Proceed\n\t# by looking at shorter domains first.\n\tzone_domains = set()\n\tfor domain in sorted(domains, key=len):\n\t\tfor d in zone_domains:\n\t\t\tif domain.endswith(\".\" + d):\n\t\t\t\t# We found a parent domain already in the list.\n\t\t\t\tbreak\n\t\telse:\n\t\t\t# 'break' did not occur: there is no parent domain.\n\t\t\tzone_domains.add(domain)\n\n\t# Make a nice and safe filename for each domain.\n\tzonefiles = [[domain, safe_domain_name(domain) + \".txt\"] for domain in zone_domains]\n\n\t# Sort the list so that the order is nice and so that nsd.conf has a\n\t# stable order so we don't rewrite the file & restart the service\n\t# meaninglessly.\n\tzone_order = sort_domains([ zone[0] for zone in zonefiles ], env)\n\tzonefiles.sort(key = lambda zone : zone_order.index(zone[0]) )\n\n\treturn zonefiles\n\ndef do_dns_update(env, force=False):\n\t# Write zone files.\n\tos.makedirs('/etc/nsd/zones', exist_ok=True)\n\tzonefiles = []\n\tupdated_domains = []\n\tfor (domain, zonefile, records) in build_zones(env):\n\t\t# The final set of files will be signed.\n\t\tzonefiles.append((domain, zonefile + \".signed\"))\n\n\t\t# See if the zone has changed, and if so update the serial number\n\t\t# and write the zone file.\n\t\tif not write_nsd_zone(domain, \"/etc/nsd/zones/\" + zonefile, records, env, force):\n\t\t\t# Zone was not updated. There were no changes.\n\t\t\tcontinue\n\n\t\t# Mark that we just updated this domain.\n\t\tupdated_domains.append(domain)\n\n\t\t# Sign the zone.\n\t\t#\n\t\t# Every time we sign the zone we get a new result, which means\n\t\t# we can't sign a zone without bumping the zone's serial number.\n\t\t# Thus we only sign a zone if write_nsd_zone returned True\n\t\t# indicating the zone changed, and thus it got a new serial number.\n\t\t# write_nsd_zone is smart enough to check if a zone's signature\n\t\t# is nearing expiration and if so it'll bump the serial number\n\t\t# and return True so we get a chance to re-sign it.\n\t\tsign_zone(domain, zonefile, env)\n\n\t# Write the main nsd.conf file.\n\tif write_nsd_conf(zonefiles, list(get_custom_dns_config(env)), env):\n\t\t# Make sure updated_domains contains *something* if we wrote an updated\n\t\t# nsd.conf so that we know to restart nsd.\n\t\tif len(updated_domains) == 0:\n\t\t\tupdated_domains.append(\"DNS configuration\")\n\n\t# Tell nsd to reload changed zone files.\n\tif len(updated_domains) > 0:\n\t\t# 'reconfig' is needed if there are added or removed zones, but\n\t\t# it may not reload existing zones, so we call 'reload' too. If\n\t\t# nsd isn't running, nsd-control fails, so in that case revert\n\t\t# to restarting nsd to make sure it is running. Restarting nsd\n\t\t# should also refresh everything.\n\t\ttry:\n\t\t\tshell('check_call', [\"/usr/sbin/nsd-control\", \"reconfig\"])\n\t\t\tshell('check_call', [\"/usr/sbin/nsd-control\", \"reload\"])\n\t\texcept:\n\t\t\tshell('check_call', [\"/usr/sbin/service\", \"nsd\", \"restart\"])\n\n\t# Write the OpenDKIM configuration tables for all of the mail domains.\n\tfrom mailconfig import get_mail_domains\n\tif write_opendkim_tables(get_mail_domains(env), env):\n\t\t# Settings changed. Kick opendkim.\n\t\tshell('check_call', [\"/usr/sbin/service\", \"opendkim\", \"restart\"])\n\t\tif len(updated_domains) == 0:\n\t\t\t# If this is the only thing that changed?\n\t\t\tupdated_domains.append(\"OpenDKIM configuration\")\n\n\t# Clear bind9's DNS cache so our own DNS resolver is up to date.\n\t# (ignore errors with trap=True)\n\tshell('check_call', [\"/usr/sbin/rndc\", \"flush\"], trap=True)\n\n\tif len(updated_domains) == 0:\n\t\t# if nothing was updated (except maybe OpenDKIM's files), don't show any output\n\t\treturn \"\"\n\telse:\n\t\treturn \"updated DNS: \" + \",\".join(updated_domains) + \"\\n\"\n\n########################################################################\n\ndef build_zones(env):\n\t# What domains (and their zone filenames) should we build?\n\tdomains = get_dns_domains(env)\n\tzonefiles = get_dns_zones(env)\n\n\t# Create a dictionary of domains to a set of attributes for each\n\t# domain, such as whether there are mail users at the domain.\n\tfrom mailconfig import get_mail_domains\n\tfrom web_update import get_web_domains\n\tmail_domains = set(get_mail_domains(env))\n\tmail_user_domains = set(get_mail_domains(env, users_only=True)) # i.e. will log in for mail, Nextcloud\n\tweb_domains = set(get_web_domains(env))\n\tauto_domains = web_domains - set(get_web_domains(env, include_auto=False))\n\tdomains |= auto_domains # www redirects not included in the initial list, see above\n\n\t# Add ns1/ns2+PRIMARY_HOSTNAME which must also have A/AAAA records\n\t# when the box is acting as authoritative DNS server for its domains.\n\tfor ns in (\"ns1\", \"ns2\"):\n\t\td = ns + \".\" + env[\"PRIMARY_HOSTNAME\"]\n\t\tdomains.add(d)\n\t\tauto_domains.add(d)\n\n\tdomains = {\n\t\tdomain: {\n\t\t\t\"user\": domain in mail_user_domains,\n\t\t\t\"mail\": domain in mail_domains,\n\t\t\t\"web\": domain in web_domains,\n\t\t\t\"auto\": domain in auto_domains,\n\t\t}\n\t\tfor domain in domains\n\t}\n\n\t# For MTA-STS, we'll need to check if the PRIMARY_HOSTNAME certificate is\n\t# singned and valid. Check that now rather than repeatedly for each domain.\n\tdomains[env[\"PRIMARY_HOSTNAME\"]][\"certificate-is-valid\"] = is_domain_cert_signed_and_valid(env[\"PRIMARY_HOSTNAME\"], env)\n\n\t# Load custom records to add to zones.\n\tadditional_records = list(get_custom_dns_config(env))\n\n\t# Build DNS records for each zone.\n\tfor domain, zonefile in zonefiles:\n\t\t# Build the records to put in the zone.\n\t\trecords = build_zone(domain, domains, additional_records, env)\n\t\tyield (domain, zonefile, records)\n\ndef build_zone(domain, domain_properties, additional_records, env, is_zone=True):\n\trecords = []\n\n\t# For top-level zones, define the authoritative name servers.\n\t#\n\t# Normally we are our own nameservers. Some TLDs require two distinct IP addresses,\n\t# so we allow the user to override the second nameserver definition so that\n\t# secondary DNS can be set up elsewhere.\n\t#\n\t# 'False' in the tuple indicates these records would not be used if the zone\n\t# is managed outside of the box.\n\tif is_zone:\n\t\t# Obligatory NS record to ns1.PRIMARY_HOSTNAME.\n\t\trecords.append((None, \"NS\", \"ns1.%s.\" % env[\"PRIMARY_HOSTNAME\"], False))\n\n\t\t# NS record to ns2.PRIMARY_HOSTNAME or whatever the user overrides.\n\t\t# User may provide one or more additional nameservers\n\t\tsecondary_ns_list = get_secondary_dns(additional_records, mode=\"NS\") \\\n\t\t\tor [\"ns2.\" + env[\"PRIMARY_HOSTNAME\"]]\n\t\trecords.extend((None, \"NS\", secondary_ns+'.', False) for secondary_ns in secondary_ns_list)\n\n\n\t# In PRIMARY_HOSTNAME...\n\tif domain == env[\"PRIMARY_HOSTNAME\"]:\n\t\t# Set the A/AAAA records. Do this early for the PRIMARY_HOSTNAME so that the user cannot override them\n\t\t# and we can provide different explanatory text.\n\t\trecords.append((None, \"A\", env[\"PUBLIC_IP\"], \"Required. Sets the IP address of the box.\"))\n\t\tif env.get(\"PUBLIC_IPV6\"): records.append((None, \"AAAA\", env[\"PUBLIC_IPV6\"], \"Required. Sets the IPv6 address of the box.\"))\n\n\t\t# Add a DANE TLSA record for SMTP.\n\t\trecords.append((\"_25._tcp\", \"TLSA\", build_tlsa_record(env), \"Recommended when DNSSEC is enabled. Advertises to mail servers connecting to the box that mandatory encryption should be used.\"))\n\n\t\t# Add a DANE TLSA record for HTTPS, which some browser extensions might make use of.\n\t\trecords.append((\"_443._tcp\", \"TLSA\", build_tlsa_record(env), \"Optional. When DNSSEC is enabled, provides out-of-band HTTPS certificate validation for a few web clients that support it.\"))\n\n\t\t# Add a SSHFP records to help SSH key validation. One per available SSH key on this system.\n\t\trecords.extend((None, \"SSHFP\", value, \"Optional. Provides an out-of-band method for verifying an SSH key before connecting. Use 'VerifyHostKeyDNS yes' (or 'VerifyHostKeyDNS ask') when connecting with ssh.\") for value in build_sshfp_records())\n\n\t# Add DNS records for any subdomains of this domain. We should not have a zone for\n\t# both a domain and one of its subdomains.\n\tif is_zone: # don't recurse when we're just loading data for a subdomain\n\t\tsubdomains = [d for d in domain_properties if d.endswith(\".\" + domain)]\n\t\tfor subdomain in subdomains:\n\t\t\tsubdomain_qname = subdomain[0:-len(\".\" + domain)]\n\t\t\tsubzone = build_zone(subdomain, domain_properties, additional_records, env, is_zone=False)\n\t\t\tfor child_qname, child_rtype, child_value, child_explanation in subzone:\n\t\t\t\tif child_qname is None:\n\t\t\t\t\tchild_qname = subdomain_qname\n\t\t\t\telse:\n\t\t\t\t\tchild_qname += \".\" + subdomain_qname\n\t\t\t\trecords.append((child_qname, child_rtype, child_value, child_explanation))\n\n\thas_rec_base = list(records) # clone current state\n\tdef has_rec(qname, rtype, prefix=None):\n\t\treturn any(rec[0] == qname and rec[1] == rtype and (prefix is None or rec[2].startswith(prefix)) for rec in has_rec_base)\n\n\t# The user may set other records that don't conflict with our settings.\n\t# Don't put any TXT records above this line, or it'll prevent any custom TXT records.\n\tfor qname, rtype, value in filter_custom_records(domain, additional_records):\n\t\t# Don't allow custom records for record types that override anything above.\n\t\t# But allow multiple custom records for the same rtype --- see how has_rec_base is used.\n\t\tif has_rec(qname, rtype): continue\n\n\t\t# The \"local\" keyword on A/AAAA records are short-hand for our own IP.\n\t\t# This also flags for web configuration that the user wants a website here.\n\t\tif rtype == \"A\" and value == \"local\":\n\t\t\tvalue = env[\"PUBLIC_IP\"]\n\t\tif rtype == \"AAAA\" and value == \"local\":\n\t\t\tif \"PUBLIC_IPV6\" in env:\n\t\t\t\tvalue = env[\"PUBLIC_IPV6\"]\n\t\t\telse:\n\t\t\t\tcontinue\n\t\trecords.append((qname, rtype, value, \"(Set by user.)\"))\n\n\t# Add A/AAAA defaults if not overridden by the user's custom settings (and not otherwise configured).\n\t# Any CNAME or A record on the qname overrides A and AAAA. But when we set the default A record,\n\t# we should not cause the default AAAA record to be skipped because it thinks a custom A record\n\t# was set. So set has_rec_base to a clone of the current set of DNS settings, and don't update\n\t# during this process.\n\thas_rec_base = list(records)\n\ta_expl = \"Required. May have a different value. Sets the IP address that %s resolves to for web hosting and other services besides mail. The A record must be present but its value does not affect mail delivery.\" % domain\n\tif domain_properties[domain][\"auto\"]:\n\t\tif domain.startswith((\"ns1.\", \"ns2.\")): a_expl = False # omit from 'External DNS' page since this only applies if box is its own DNS server\n\t\tif domain.startswith(\"www.\"): a_expl = \"Optional. Sets the IP address that %s resolves to so that the box can provide a redirect to the parent domain.\" % domain\n\t\tif domain.startswith(\"mta-sts.\"): a_expl = \"Optional. MTA-STS Policy Host serving /.well-known/mta-sts.txt.\"\n\t\tif domain.startswith(\"autoconfig.\"): a_expl = \"Provides email configuration autodiscovery support for Thunderbird Autoconfig.\"\n\t\tif domain.startswith(\"autodiscover.\"): a_expl = \"Provides email configuration autodiscovery support for Z-Push ActiveSync Autodiscover.\"\n\tdefaults = [\n\t\t(None, \"A\", env[\"PUBLIC_IP\"], a_expl),\n\t\t(None, \"AAAA\", env.get('PUBLIC_IPV6'), \"Optional. Sets the IPv6 address that %s resolves to, e.g. for web hosting. (It is not necessary for receiving mail on this domain.)\" % domain),\n\t]\n\tfor qname, rtype, value, explanation in defaults:\n\t\tif value is None or value.strip() == \"\": continue # skip IPV6 if not set\n\t\tif not is_zone and qname == \"www\": continue # don't create any default 'www' subdomains on what are themselves subdomains\n\t\t# Set the default record, but not if:\n\t\t# (1) there is not a user-set record of the same type already\n\t\t# (2) there is not a CNAME record already, since you can't set both and who knows what takes precedence\n\t\t# (2) there is not an A record already (if this is an A record this is a dup of (1), and if this is an AAAA record then don't set a default AAAA record if the user sets a custom A record, since the default wouldn't make sense and it should not resolve if the user doesn't provide a new AAAA record)\n\t\tif not has_rec(qname, rtype) and not has_rec(qname, \"CNAME\") and not has_rec(qname, \"A\"):\n\t\t\trecords.append((qname, rtype, value, explanation))\n\n\t# Don't pin the list of records that has_rec checks against anymore.\n\thas_rec_base = records\n\n\tif domain_properties[domain][\"mail\"]:\n\t\t# The MX record says where email for the domain should be delivered: Here!\n\t\tif not has_rec(None, \"MX\", prefix=\"10 \"):\n\t\t\trecords.append((None, \"MX\", \"10 %s.\" % env[\"PRIMARY_HOSTNAME\"], \"Required. Specifies the hostname (and priority) of the machine that handles @%s mail.\" % domain))\n\n\t\t# SPF record: Permit the box ('mx', see above) to send mail on behalf of\n\t\t# the domain, and no one else.\n\t\t# Skip if the user has set a custom SPF record.\n\t\tif not has_rec(None, \"TXT\", prefix=\"v=spf1 \"):\n\t\t\trecords.append((None, \"TXT\", 'v=spf1 mx -all', \"Recommended. Specifies that only the box is permitted to send @%s mail.\" % domain))\n\n\t\t# Append the DKIM TXT record to the zone as generated by OpenDKIM.\n\t\t# Skip if the user has set a DKIM record already.\n\t\topendkim_record_file = os.path.join(env['STORAGE_ROOT'], 'mail/dkim/mail.txt')\n\t\twith open(opendkim_record_file, encoding=\"utf-8\") as orf:\n\t\t\tm = re.match(r'(\\S+)\\s+IN\\s+TXT\\s+\\( ((?:\"[^\"]+\"\\s+)+)\\)', orf.read(), re.S)\n\t\t\tval = \"\".join(re.findall(r'\"([^\"]+)\"', m.group(2)))\n\t\t\tif not has_rec(m.group(1), \"TXT\", prefix=\"v=DKIM1; \"):\n\t\t\t\trecords.append((m.group(1), \"TXT\", val, \"Recommended. Provides a way for recipients to verify that this machine sent @%s mail.\" % domain))\n\n\t\t# Append a DMARC record.\n\t\t# Skip if the user has set a DMARC record already.\n\t\tif not has_rec(\"_dmarc\", \"TXT\", prefix=\"v=DMARC1; \"):\n\t\t\trecords.append((\"_dmarc\", \"TXT\", 'v=DMARC1; p=quarantine;', \"Recommended. Specifies that mail that does not originate from the box but claims to be from @%s or which does not have a valid DKIM signature is suspect and should be quarantined by the recipient's mail system.\" % domain))\n\n\tif domain_properties[domain][\"user\"]:\n\t\t# Add CardDAV/CalDAV SRV records on the non-primary hostname that points to the primary hostname\n\t\t# for autoconfiguration of mail clients (so only domains hosting user accounts need it).\n\t\t# The SRV record format is priority (0, whatever), weight (0, whatever), port, service provider hostname (w/ trailing dot).\n\t\tif domain != env[\"PRIMARY_HOSTNAME\"]:\n\t\t\tfor dav in (\"card\", \"cal\"):\n\t\t\t\tqname = \"_\" + dav + \"davs._tcp\"\n\t\t\t\tif not has_rec(qname, \"SRV\"):\n\t\t\t\t\trecords.append((qname, \"SRV\", \"0 0 443 \" + env[\"PRIMARY_HOSTNAME\"] + \".\", \"Recommended. Specifies the hostname of the server that handles CardDAV/CalDAV services for email addresses on this domain.\"))\n\n\t# If this is a domain name that there are email addresses configured for, i.e. \"something@\"\n\t# this domain name, then the domain name is a MTA-STS (https://tools.ietf.org/html/rfc8461)\n\t# Policy Domain.\n\t#\n\t# A \"_mta-sts\" TXT record signals the presence of a MTA-STS policy. The id field helps clients\n\t# cache the policy. It should be stable so we don't update DNS unnecessarily but change when\n\t# the policy changes. It must be at most 32 letters and numbers, so we compute a hash of the\n\t# policy file.\n\t#\n\t# The policy itself is served at the \"mta-sts\" (no underscore) subdomain over HTTPS. Therefore\n\t# the TLS certificate used by Postfix for STARTTLS must be a valid certificate for the MX\n\t# domain name (PRIMARY_HOSTNAME) *and* the TLS certificate used by nginx for HTTPS on the mta-sts\n\t# subdomain must be valid certificate for that domain. Do not set an MTA-STS policy if either\n\t# certificate in use is not valid (e.g. because it is self-signed and a valid certificate has not\n\t# yet been provisioned). Since we cannot provision a certificate without A/AAAA records, we\n\t# always set them (by including them in the www domains) --- only the TXT records depend on there\n\t# being valid certificates.\n\tmta_sts_records = [ ]\n\tif domain_properties[domain][\"mail\"] \\\n\t and domain_properties[env[\"PRIMARY_HOSTNAME\"]][\"certificate-is-valid\"] \\\n\t and is_domain_cert_signed_and_valid(\"mta-sts.\" + domain, env):\n\t\t# Compute an up-to-32-character hash of the policy file. We'll take a SHA-1 hash of the policy\n\t\t# file (20 bytes) and encode it as base-64 (28 bytes, using alphanumeric alternate characters\n\t\t# instead of '+' and '/' which are not allowed in an MTA-STS policy id) but then just take its\n\t\t# first 20 characters, which is more than sufficient to change whenever the policy file changes\n\t\t# (and ensures any '=' padding at the end of the base64 encoding is dropped).\n\t\twith open(\"/var/lib/mailinabox/mta-sts.txt\", \"rb\") as f:\n\t\t\tmta_sts_policy_id = base64.b64encode(hashlib.sha1(f.read()).digest(), altchars=b\"AA\").decode(\"ascii\")[0:20]\n\t\tmta_sts_records.extend([\n\t\t\t(\"_mta-sts\", \"TXT\", \"v=STSv1; id=\" + mta_sts_policy_id, \"Optional. Part of the MTA-STS policy for incoming mail. If set, a MTA-STS policy must also be published.\")\n\t\t])\n\n\t\t# Enable SMTP TLS reporting (https://tools.ietf.org/html/rfc8460) if the user has set a config option.\n\t\t# Skip if the rules below if the user has set a custom _smtp._tls record.\n\t\tif env.get(\"MTA_STS_TLSRPT_RUA\") and not has_rec(\"_smtp._tls\", \"TXT\", prefix=\"v=TLSRPTv1;\"):\n\t\t\tmta_sts_records.append((\"_smtp._tls\", \"TXT\", \"v=TLSRPTv1; rua=\" + env[\"MTA_STS_TLSRPT_RUA\"], \"Optional. Enables MTA-STS reporting.\"))\n\tfor qname, rtype, value, explanation in mta_sts_records:\n\t\tif not has_rec(qname, rtype):\n\t\t\trecords.append((qname, rtype, value, explanation))\n\n\t# Add no-mail-here records for any qname that has an A or AAAA record\n\t# but no MX record. This would include domain itself if domain is a\n\t# non-mail domain and also may include qnames from custom DNS records.\n\t# Do this once at the end of generating a zone.\n\tif is_zone:\n\t\tqnames_with_a = {qname for (qname, rtype, value, explanation) in records if rtype in {\"A\", \"AAAA\"}}\n\t\tqnames_with_mx = {qname for (qname, rtype, value, explanation) in records if rtype == \"MX\"}\n\t\tfor qname in qnames_with_a - qnames_with_mx:\n\t\t\t# Mark this domain as not sending mail with hard-fail SPF and DMARC records.\n\t\t\td = (qname+\".\" if qname else \"\") + domain\n\t\t\tif not has_rec(qname, \"TXT\", prefix=\"v=spf1 \"):\n\t\t\t\trecords.append((qname, \"TXT\", 'v=spf1 -all', \"Recommended. Prevents use of this domain name for outbound mail by specifying that no servers are valid sources for mail from @%s. If you do send email from this domain name you should either override this record such that the SPF rule does allow the originating server, or, take the recommended approach and have the box handle mail for this domain (simply add any receiving alias at this domain name to make this machine treat the domain name as one of its mail domains).\" % d))\n\t\t\tif not has_rec(\"_dmarc\" + (\".\"+qname if qname else \"\"), \"TXT\", prefix=\"v=DMARC1; \"):\n\t\t\t\trecords.append((\"_dmarc\" + (\".\"+qname if qname else \"\"), \"TXT\", 'v=DMARC1; p=reject;', \"Recommended. Prevents use of this domain name for outbound mail by specifying that the SPF rule should be honoured for mail from @%s.\" % d))\n\n\t\t\t# And with a null MX record (https://explained-from-first-principles.com/email/#null-mx-record)\n\t\t\tif not has_rec(qname, \"MX\"):\n\t\t\t\trecords.append((qname, \"MX\", '0 .', \"Recommended. Prevents use of this domain name for incoming mail.\"))\n\n\t# Sort the records. The None records *must* go first in the nsd zone file. Otherwise it doesn't matter.\n\trecords.sort(key = lambda rec : list(reversed(rec[0].split(\".\")) if rec[0] is not None else \"\"))\n\n\treturn records\n\ndef is_domain_cert_signed_and_valid(domain, env):\n\tcert = get_ssl_certificates(env).get(domain)\n\tif not cert: return False # no certificate provisioned\n\tcert_status = check_certificate(domain, cert['certificate'], cert['private-key'])\n\treturn cert_status[0] == 'OK'\n\n########################################################################\n\ndef build_tlsa_record(env):\n\t# A DANE TLSA record in DNS specifies that connections on a port\n\t# must use TLS and the certificate must match a particular criteria.\n\t#\n\t# Thanks to http://blog.huque.com/2012/10/dnssec-and-certificates.html\n\t# and https://community.letsencrypt.org/t/please-avoid-3-0-1-and-3-0-2-dane-tlsa-records-with-le-certificates/7022\n\t# for explaining all of this! Also see https://tools.ietf.org/html/rfc6698#section-2.1\n\t# and https://github.com/mail-in-a-box/mailinabox/issues/268#issuecomment-167160243.\n\t#\n\t# There are several criteria. We used to use \"3 0 1\" criteria, which\n\t# meant to pin a leaf (3) certificate (0) with SHA256 hash (1). But\n\t# certificates change, and especially as we move to short-lived certs\n\t# they change often. The TLSA record handily supports the criteria of\n\t# a leaf certificate (3)'s subject public key (1) with SHA256 hash (1).\n\t# The subject public key is the public key portion of the private key\n\t# that generated the CSR that generated the certificate. Since we\n\t# generate a private key once the first time Mail-in-a-Box is set up\n\t# and reuse it for all subsequent certificates, the TLSA record will\n\t# remain valid indefinitely.\n\n\tfrom ssl_certificates import load_cert_chain, load_pem\n\tfrom cryptography.hazmat.primitives.serialization import Encoding, PublicFormat\n\n\tfn = os.path.join(env[\"STORAGE_ROOT\"], \"ssl\", \"ssl_certificate.pem\")\n\tcert = load_pem(load_cert_chain(fn)[0])\n\n\tsubject_public_key = cert.public_key().public_bytes(Encoding.DER, PublicFormat.SubjectPublicKeyInfo)\n\t# We could have also loaded ssl_private_key.pem and called priv_key.public_key().public_bytes(...)\n\n\tpk_hash = hashlib.sha256(subject_public_key).hexdigest()\n\n\t# Specify the TLSA parameters:\n\t# 3: Match the (leaf) certificate. (No CA, no trust path needed.)\n\t# 1: Match its subject public key.\n\t# 1: Use SHA256.\n\treturn \"3 1 1 \" + pk_hash\n\ndef build_sshfp_records():\n\t# The SSHFP record is a way for us to embed this server's SSH public\n\t# key fingerprint into the DNS so that remote hosts have an out-of-band\n\t# method to confirm the fingerprint. See RFC 4255 and RFC 6594. This\n\t# depends on DNSSEC.\n\t#\n\t# On the client side, set SSH's VerifyHostKeyDNS option to 'ask' to\n\t# include this info in the key verification prompt or 'yes' to trust\n\t# the SSHFP record.\n\t#\n\t# See https://github.com/xelerance/sshfp for inspiriation.\n\n\talgorithm_number = {\n\t\t\"ssh-rsa\": 1,\n\t\t\"ssh-dss\": 2,\n\t\t\"ecdsa-sha2-nistp256\": 3,\n\t\t\"ssh-ed25519\": 4,\n\t}\n\n\t# Get our local fingerprints by running ssh-keyscan. The output looks\n\t# like the known_hosts file: hostname, keytype, fingerprint. The order\n\t# of the output is arbitrary, so sort it to prevent spurious updates\n\t# to the zone file (that trigger bumping the serial number). However,\n\t# if SSH has been configured to listen on a nonstandard port, we must\n\t# specify that port to sshkeyscan.\n\n\tport = get_ssh_port()\n\n\t# If nothing returned, SSH is probably not installed.\n\tif not port:\n\t\treturn\n\n\tkeys = shell(\"check_output\", [\"ssh-keyscan\", \"-4\", \"-t\", \"rsa,dsa,ecdsa,ed25519\", \"-p\", str(port), \"localhost\"])\n\tkeys = sorted(keys.split(\"\\n\"))\n\n\tfor key in keys:\n\t\tif key.strip() == \"\" or key[0] == \"#\": continue\n\t\ttry:\n\t\t\t_host, keytype, pubkey = key.split(\" \")\n\t\t\tyield \"%d %d ( %s )\" % (\n\t\t\t\talgorithm_number[keytype],\n\t\t\t\t2, # specifies we are using SHA-256 on next line\n\t\t\t\thashlib.sha256(base64.b64decode(pubkey)).hexdigest().upper(),\n\t\t\t\t)\n\t\texcept:\n\t\t\t# Lots of things can go wrong. Don't let it disturb the DNS\n\t\t\t# zone.\n\t\t\tpass\n\n########################################################################\n\ndef write_nsd_zone(domain, zonefile, records, env, force):\n\t# On the $ORIGIN line, there's typically a ';' comment at the end explaining\n\t# what the $ORIGIN line does. Any further data after the domain confuses\n\t# ldns-signzone, however. It used to say '; default zone domain'.\n\t#\n\t# The SOA contact address for all of the domains on this system is hostmaster\n\t# @ the PRIMARY_HOSTNAME. Hopefully that's legit.\n\t#\n\t# For the refresh through TTL fields, a good reference is:\n\t# https://www.ripe.net/publications/docs/ripe-203\n\t#\n\t# A hash of the available DNSSEC keys are added in a comment so that when\n\t# the keys change we force a re-generation of the zone which triggers\n\t# re-signing it.\n\n\tzone = \"\"\"\n$ORIGIN {domain}.\n$TTL 86400 ; default time to live\n\n@ IN SOA ns1.{primary_domain}. hostmaster.{primary_domain}. (\n __SERIAL__ ; serial number\n 7200 ; Refresh (secondary nameserver update interval)\n 3600 ; Retry (when refresh fails, how often to try again, should be lower than the refresh)\n 1209600 ; Expire (when refresh fails, how long secondary nameserver will keep records around anyway)\n 86400 ; Negative TTL (how long negative responses are cached)\n )\n\"\"\"\n\n\t# Replace replacement strings.\n\tzone = zone.format(domain=domain, primary_domain=env[\"PRIMARY_HOSTNAME\"])\n\n\t# Add records.\n\tfor subdomain, querytype, value, _explanation in records:\n\t\tif subdomain:\n\t\t\tzone += subdomain\n\t\tzone += \"\\tIN\\t\" + querytype + \"\\t\"\n\t\tif querytype == \"TXT\":\n\t\t\t# Divide into 255-byte max substrings.\n\t\t\tv2 = \"\"\n\t\t\twhile len(value) > 0:\n\t\t\t\ts = value[0:255]\n\t\t\t\tvalue = value[255:]\n\t\t\t\ts = s.replace('\\\\', '\\\\\\\\') # escape backslashes\n\t\t\t\ts = s.replace('\"', '\\\\\"') # escape quotes\n\t\t\t\ts = '\"' + s + '\"' # wrap in quotes\n\t\t\t\tv2 += s + \" \"\n\t\t\tvalue = v2\n\t\tzone += value + \"\\n\"\n\n\t# Append a stable hash of DNSSEC signing keys in a comment.\n\tzone += f\"\\n; DNSSEC signing keys hash: {hash_dnssec_keys(domain, env)}\\n\"\n\n\t# DNSSEC requires re-signing a zone periodically. That requires\n\t# bumping the serial number even if no other records have changed.\n\t# We don't see the DNSSEC records yet, so we have to figure out\n\t# if a re-signing is necessary so we can prematurely bump the\n\t# serial number.\n\tforce_bump = False\n\tif not os.path.exists(zonefile + \".signed\"):\n\t\t# No signed file yet. Shouldn't normally happen unless a box\n\t\t# is going from not using DNSSEC to using DNSSEC.\n\t\tforce_bump = True\n\telse:\n\t\t# We've signed the domain. Check if we are close to the expiration\n\t\t# time of the signature. If so, we'll force a bump of the serial\n\t\t# number so we can re-sign it.\n\t\twith open(zonefile + \".signed\", encoding=\"utf-8\") as f:\n\t\t\tsigned_zone = f.read()\n\t\texpiration_times = re.findall(r\"\\sRRSIG\\s+SOA\\s+\\d+\\s+\\d+\\s\\d+\\s+(\\d{14})\", signed_zone)\n\t\tif len(expiration_times) == 0:\n\t\t\t# weird\n\t\t\tforce_bump = True\n\t\telse:\n\t\t\t# All of the times should be the same, but if not choose the soonest.\n\t\t\texpiration_time = min(expiration_times)\n\t\t\texpiration_time = datetime.datetime.strptime(expiration_time, \"%Y%m%d%H%M%S\")\n\t\t\tif expiration_time - datetime.datetime.now() < datetime.timedelta(days=3):\n\t\t\t\t# We're within three days of the expiration, so bump serial & resign.\n\t\t\t\tforce_bump = True\n\n\t# Set the serial number.\n\tserial = datetime.datetime.now().strftime(\"%Y%m%d00\")\n\tif os.path.exists(zonefile):\n\t\t# If the zone already exists, is different, and has a later serial number,\n\t\t# increment the number.\n\t\twith open(zonefile, encoding=\"utf-8\") as f:\n\t\t\texisting_zone = f.read()\n\t\t\tm = re.search(r\"(\\d+)\\s*;\\s*serial number\", existing_zone)\n\t\t\tif m:\n\t\t\t\t# Clear out the serial number in the existing zone file for the\n\t\t\t\t# purposes of seeing if anything *else* in the zone has changed.\n\t\t\t\texisting_serial = m.group(1)\n\t\t\t\texisting_zone = existing_zone.replace(m.group(0), \"__SERIAL__ ; serial number\")\n\n\t\t\t\t# If the existing zone is the same as the new zone (modulo the serial number),\n\t\t\t\t# there is no need to update the file. Unless we're forcing a bump.\n\t\t\t\tif zone == existing_zone and not force_bump and not force:\n\t\t\t\t\treturn False\n\n\t\t\t\t# If the existing serial is not less than a serial number\n\t\t\t\t# based on the current date plus 00, increment it. Otherwise,\n\t\t\t\t# the serial number is less than our desired new serial number\n\t\t\t\t# so we'll use the desired new number.\n\t\t\t\tif existing_serial >= serial:\n\t\t\t\t\tserial = str(int(existing_serial) + 1)\n\n\tzone = zone.replace(\"__SERIAL__\", serial)\n\n\t# Write the zone file.\n\twith open(zonefile, \"w\", encoding=\"utf-8\") as f:\n\t\tf.write(zone)\n\n\treturn True # file is updated\n\ndef get_dns_zonefile(zone, env):\n\tfor domain, fn in get_dns_zones(env):\n\t\tif zone == domain:\n\t\t\tbreak\n\telse:\n\t\traise ValueError(\"%s is not a domain name that corresponds to a zone.\" % zone)\n\n\tnsd_zonefile = \"/etc/nsd/zones/\" + fn\n\twith open(nsd_zonefile, encoding=\"utf-8\") as f:\n\t\treturn f.read()\n\n########################################################################\n\ndef write_nsd_conf(zonefiles, additional_records, env):\n\t# Write the list of zones to a configuration file.\n\tnsd_conf_file = \"/etc/nsd/nsd.conf.d/zones.conf\"\n\tnsdconf = \"\"\n\n\t# Append the zones.\n\tfor domain, zonefile in zonefiles:\n\t\tnsdconf += f\"\"\"\nzone:\n\tname: {domain}\n\tzonefile: {zonefile}\n\"\"\"\n\n\t\t# If custom secondary nameservers have been set, allow zone transfers\n\t\t# and, if not a subnet, notifies to them.\n\t\tfor ipaddr in get_secondary_dns(additional_records, mode=\"xfr\"):\n\t\t\tif \"/\" not in ipaddr:\n\t\t\t\tnsdconf += \"\\n\\tnotify: %s NOKEY\" % (ipaddr)\n\t\t\tnsdconf += \"\\n\\tprovide-xfr: %s NOKEY\\n\" % (ipaddr)\n\n\t# Check if the file is changing. If it isn't changing,\n\t# return False to flag that no change was made.\n\tif os.path.exists(nsd_conf_file):\n\t\twith open(nsd_conf_file, encoding=\"utf-8\") as f:\n\t\t\tif f.read() == nsdconf:\n\t\t\t\treturn False\n\n\t# Write out new contents and return True to signal that\n\t# configuration changed.\n\twith open(nsd_conf_file, \"w\", encoding=\"utf-8\") as f:\n\t\tf.write(nsdconf)\n\treturn True\n\n########################################################################\n\ndef find_dnssec_signing_keys(domain, env):\n\t# For key that we generated (one per algorithm)...\n\td = os.path.join(env['STORAGE_ROOT'], 'dns/dnssec')\n\tkeyconfs = [f for f in os.listdir(d) if f.endswith(\".conf\")]\n\tfor keyconf in keyconfs:\n\t\t# Load the file holding the KSK and ZSK key filenames.\n\t\tkeyconf_fn = os.path.join(d, keyconf)\n\t\tkeyinfo = load_env_vars_from_file(keyconf_fn)\n\n\t\t# Skip this key if the conf file has a setting named DOMAINS,\n\t\t# holding a comma-separated list of domain names, and if this\n\t\t# domain is not in the list. This allows easily disabling a\n\t\t# key by setting \"DOMAINS=\" or \"DOMAINS=none\", other than\n\t\t# deleting the key's .conf file, which might result in the key\n\t\t# being regenerated next upgrade. Keys should be disabled if\n\t\t# they are not needed to reduce the DNSSEC query response size.\n\t\tif \"DOMAINS\" in keyinfo and domain not in [dd.strip() for dd in keyinfo[\"DOMAINS\"].split(\",\")]:\n\t\t\tcontinue\n\n\t\tfor keytype in (\"KSK\", \"ZSK\"):\n\t\t\tyield keytype, keyinfo[keytype]\n\ndef hash_dnssec_keys(domain, env):\n\t# Create a stable (by sorting the items) hash of all of the private keys\n\t# that will be used to sign this domain.\n\tkeydata = []\n\tfor keytype, keyfn in sorted(find_dnssec_signing_keys(domain, env)):\n\t\toldkeyfn = os.path.join(env['STORAGE_ROOT'], 'dns/dnssec', keyfn + \".private\")\n\t\tkeydata.extend((keytype, keyfn))\n\t\twith open(oldkeyfn, encoding=\"utf-8\") as fr:\n\t\t\tkeydata.append( fr.read() )\n\tkeydata = \"\".join(keydata).encode(\"utf8\")\n\treturn hashlib.sha1(keydata).hexdigest()\n\ndef sign_zone(domain, zonefile, env):\n\t# Sign the zone with all of the keys that were generated during\n\t# setup so that the user can choose which to use in their DS record at\n\t# their registrar, and also to support migration to newer algorithms.\n\n\t# In order to use the key files generated at setup which are for\n\t# the domain _domain_, we have to re-write the files and place\n\t# the actual domain name in it, so that ldns-signzone works.\n\t#\n\t# Patch each key, storing the patched version in /tmp for now.\n\t# Each key has a .key and .private file. Collect a list of filenames\n\t# for all of the keys (and separately just the key-signing keys).\n\tall_keys = []\n\tksk_keys = []\n\tfor keytype, keyfn in find_dnssec_signing_keys(domain, env):\n\t\tnewkeyfn = '/tmp/' + keyfn.replace(\"_domain_\", domain)\n\n\t\tfor ext in (\".private\", \".key\"):\n\t\t\t# Copy the .key and .private files to /tmp to patch them up.\n\t\t\t#\n\t\t\t# Use os.umask and open().write() to securely create a copy that only\n\t\t\t# we (root) can read.\n\t\t\toldkeyfn = os.path.join(env['STORAGE_ROOT'], 'dns/dnssec', keyfn + ext)\n\t\t\twith open(oldkeyfn, encoding=\"utf-8\") as fr:\n\t\t\t\tkeydata = fr.read()\n\t\t\tkeydata = keydata.replace(\"_domain_\", domain)\n\t\t\tprev_umask = os.umask(0o77) # ensure written file is not world-readable\n\t\t\ttry:\n\t\t\t\twith open(newkeyfn + ext, \"w\", encoding=\"utf-8\") as fw:\n\t\t\t\t\tfw.write(keydata)\n\t\t\tfinally:\n\t\t\t\tos.umask(prev_umask) # other files we write should be world-readable\n\n\t\t# Put the patched key filename base (without extension) into the list of keys we'll sign with.\n\t\tall_keys.append(newkeyfn)\n\t\tif keytype == \"KSK\": ksk_keys.append(newkeyfn)\n\n\t# Do the signing.\n\texpiry_date = (datetime.datetime.now() + datetime.timedelta(days=30)).strftime(\"%Y%m%d\")\n\tshell('check_call', [\"/usr/bin/ldns-signzone\",\n\t\t# expire the zone after 30 days\n\t\t\"-e\", expiry_date,\n\n\t\t# use NSEC3\n\t\t\"-n\",\n\n\t\t# zonefile to sign\n\t\t\"/etc/nsd/zones/\" + zonefile,\n\t]\n\t\t# keys to sign with (order doesn't matter -- it'll figure it out)\n\t\t+ all_keys\n\t)\n\n\t# Create a DS record based on the patched-up key files. The DS record is specific to the\n\t# zone being signed, so we can't use the .ds files generated when we created the keys.\n\t# The DS record points to the KSK only. Write this next to the zone file so we can\n\t# get it later to give to the user with instructions on what to do with it.\n\t#\n\t# Generate a DS record for each key. There are also several possible hash algorithms that may\n\t# be used, so we'll pre-generate all for each key. One DS record per line. Only one\n\t# needs to actually be deployed at the registrar. We'll select the preferred one\n\t# in the status checks.\n\twith open(\"/etc/nsd/zones/\" + zonefile + \".ds\", \"w\", encoding=\"utf-8\") as f:\n\t\tfor key in ksk_keys:\n\t\t\tfor digest_type in ('1', '2', '4'):\n\t\t\t\trr_ds = shell('check_output', [\"/usr/bin/ldns-key2ds\",\n\t\t\t\t\t\"-n\", # output to stdout\n\t\t\t\t\t\"-\" + digest_type, # 1=SHA1, 2=SHA256, 4=SHA384\n\t\t\t\t\tkey + \".key\"\n\t\t\t\t])\n\t\t\t\tf.write(rr_ds)\n\n\t# Remove the temporary patched key files.\n\tfor fn in all_keys:\n\t\tos.unlink(fn + \".private\")\n\t\tos.unlink(fn + \".key\")\n\n########################################################################\n\ndef write_opendkim_tables(domains, env):\n\t# Append a record to OpenDKIM's KeyTable and SigningTable for each domain\n\t# that we send mail from (zones and all subdomains).\n\n\topendkim_key_file = os.path.join(env['STORAGE_ROOT'], 'mail/dkim/mail.private')\n\n\tif not os.path.exists(opendkim_key_file):\n\t\t# Looks like OpenDKIM is not installed.\n\t\treturn False\n\n\tconfig = {\n\t\t# The SigningTable maps email addresses to a key in the KeyTable that\n\t\t# specifies signing information for matching email addresses. Here we\n\t\t# map each domain to a same-named key.\n\t\t#\n\t\t# Elsewhere we set the DMARC policy for each domain such that mail claiming\n\t\t# to be From: the domain must be signed with a DKIM key on the same domain.\n\t\t# So we must have a separate KeyTable entry for each domain.\n\t\t\"SigningTable\":\n\t\t\t\"\".join(\n\t\t\t\tf\"*@{domain} {domain}\\n\"\n\t\t\t\tfor domain in domains\n\t\t\t),\n\n\t\t# The KeyTable specifies the signing domain, the DKIM selector, and the\n\t\t# path to the private key to use for signing some mail. Per DMARC, the\n\t\t# signing domain must match the sender's From: domain.\n\t\t\"KeyTable\":\n\t\t\t\"\".join(\n\t\t\t\tf\"{domain} {domain}:mail:{opendkim_key_file}\\n\"\n\t\t\t\tfor domain in domains\n\t\t\t),\n\t}\n\n\tdid_update = False\n\tfor filename, content in config.items():\n\t\t# Don't write the file if it doesn't need an update.\n\t\tif os.path.exists(\"/etc/opendkim/\" + filename):\n\t\t\twith open(\"/etc/opendkim/\" + filename, encoding=\"utf-8\") as f:\n\t\t\t\tif f.read() == content:\n\t\t\t\t\tcontinue\n\n\t\t# The contents needs to change.\n\t\twith open(\"/etc/opendkim/\" + filename, \"w\", encoding=\"utf-8\") as f:\n\t\t\tf.write(content)\n\t\tdid_update = True\n\n\t# Return whether the files changed. If they didn't change, there's\n\t# no need to kick the opendkim process.\n\treturn did_update\n\n########################################################################\n\ndef get_custom_dns_config(env, only_real_records=False):\n\ttry:\n\t\twith open(os.path.join(env['STORAGE_ROOT'], 'dns/custom.yaml'), encoding=\"utf-8\") as f:\n\t\t\tcustom_dns = rtyaml.load(f)\n\t\tif not isinstance(custom_dns, dict): raise ValueError # caught below\n\texcept:\n\t\treturn [ ]\n\n\tfor qname, value in custom_dns.items():\n\t\tif qname == \"_secondary_nameserver\" and only_real_records: continue # skip fake record\n\n\t\t# Short form. Mapping a domain name to a string is short-hand\n\t\t# for creating A records.\n\t\tif isinstance(value, str):\n\t\t\tvalues = [(\"A\", value)]\n\n\t\t# A mapping creates multiple records.\n\t\telif isinstance(value, dict):\n\t\t\tvalues = value.items()\n\n\t\t# No other type of data is allowed.\n\t\telse:\n\t\t\traise ValueError\n\n\t\tfor rtype, value2 in values:\n\t\t\tif isinstance(value2, str):\n\t\t\t\tyield (qname, rtype, value2)\n\t\t\telif isinstance(value2, list):\n\t\t\t\tfor value3 in value2:\n\t\t\t\t\tyield (qname, rtype, value3)\n\t\t\t# No other type of data is allowed.\n\t\t\telse:\n\t\t\t\traise ValueError\n\ndef filter_custom_records(domain, custom_dns_iter):\n\tfor qname, rtype, value in custom_dns_iter:\n\t\t# We don't count the secondary nameserver config (if present) as a record - that would just be\n\t\t# confusing to users. Instead it is accessed/manipulated directly via (get/set)_custom_dns_config.\n\t\tif qname == \"_secondary_nameserver\": continue\n\n\t\t# Is this record for the domain or one of its subdomains?\n\t\t# If `domain` is None, return records for all domains.\n\t\tif domain is not None and qname != domain and not qname.endswith(\".\" + domain): continue\n\n\t\t# Turn the fully qualified domain name in the YAML file into\n\t\t# our short form (None => domain, or a relative QNAME) if\n\t\t# domain is not None.\n\t\tif domain is not None:\n\t\t\tqname = None if qname == domain else qname[0:len(qname) - len(\".\" + domain)]\n\n\t\tyield (qname, rtype, value)\n\ndef write_custom_dns_config(config, env):\n\t# We get a list of (qname, rtype, value) triples. Convert this into a\n\t# nice dictionary format for storage on disk.\n\tfrom collections import OrderedDict\n\tconfig = list(config)\n\tdns = OrderedDict()\n\tseen_qnames = set()\n\n\t# Process the qnames in the order we see them.\n\tfor qname in [rec[0] for rec in config]:\n\t\tif qname in seen_qnames: continue\n\t\tseen_qnames.add(qname)\n\n\t\trecords = [(rec[1], rec[2]) for rec in config if rec[0] == qname]\n\t\tif len(records) == 1 and records[0][0] == \"A\":\n\t\t\tdns[qname] = records[0][1]\n\t\telse:\n\t\t\tdns[qname] = OrderedDict()\n\t\t\tseen_rtypes = set()\n\n\t\t\t# Process the rtypes in the order we see them.\n\t\t\tfor rtype in [rec[0] for rec in records]:\n\t\t\t\tif rtype in seen_rtypes: continue\n\t\t\t\tseen_rtypes.add(rtype)\n\n\t\t\t\tvalues = [rec[1] for rec in records if rec[0] == rtype]\n\t\t\t\tif len(values) == 1:\n\t\t\t\t\tvalues = values[0]\n\t\t\t\tdns[qname][rtype] = values\n\n\t# Write.\n\tconfig_yaml = rtyaml.dump(dns)\n\twith open(os.path.join(env['STORAGE_ROOT'], 'dns/custom.yaml'), \"w\", encoding=\"utf-8\") as f:\n\t\tf.write(config_yaml)\n\ndef set_custom_dns_record(qname, rtype, value, action, env):\n\t# validate qname\n\tfor zone, _fn in get_dns_zones(env):\n\t\t# It must match a zone apex or be a subdomain of a zone\n\t\t# that we are otherwise hosting.\n\t\tif qname == zone or qname.endswith(\".\"+zone):\n\t\t\tbreak\n\telse:\n\t\t# No match.\n\t\tif qname != \"_secondary_nameserver\":\n\t\t\traise ValueError(\"%s is not a domain name or a subdomain of a domain name managed by this box.\" % qname)\n\n\t# validate rtype\n\trtype = rtype.upper()\n\tif value is not None and qname != \"_secondary_nameserver\":\n\t\tif not re.search(DOMAIN_RE, qname):\n\t\t\tmsg = \"Invalid name.\"\n\t\t\traise ValueError(msg)\n\n\t\tif rtype in {\"A\", \"AAAA\"}:\n\t\t\tif value != \"local\": # \"local\" is a special flag for us\n\t\t\t\tv = ipaddress.ip_address(value) # raises a ValueError if there's a problem\n\t\t\t\tif rtype == \"A\" and not isinstance(v, ipaddress.IPv4Address): raise ValueError(\"That's an IPv6 address.\")\n\t\t\t\tif rtype == \"AAAA\" and not isinstance(v, ipaddress.IPv6Address): raise ValueError(\"That's an IPv4 address.\")\n\t\telif rtype in {\"CNAME\", \"NS\"}:\n\t\t\tif rtype == \"NS\" and qname == zone:\n\t\t\t\tmsg = \"NS records can only be set for subdomains.\"\n\t\t\t\traise ValueError(msg)\n\n\t\t\t# ensure value has a trailing dot\n\t\t\tif not value.endswith(\".\"):\n\t\t\t\tvalue = value + \".\"\n\n\t\t\tif not re.search(DOMAIN_RE, value):\n\t\t\t\tmsg = \"Invalid value.\"\n\t\t\t\traise ValueError(msg)\n\t\telif rtype in {\"CNAME\", \"TXT\", \"SRV\", \"MX\", \"SSHFP\", \"CAA\"}:\n\t\t\t# anything goes\n\t\t\tpass\n\t\telse:\n\t\t\traise ValueError(\"Unknown record type '%s'.\" % rtype)\n\n\t# load existing config\n\tconfig = list(get_custom_dns_config(env))\n\n\t# update\n\tnewconfig = []\n\tmade_change = False\n\tneeds_add = True\n\tfor _qname, _rtype, _value in config:\n\t\tif action == \"add\":\n\t\t\tif (_qname, _rtype, _value) == (qname, rtype, value):\n\t\t\t\t# Record already exists. Bail.\n\t\t\t\treturn False\n\t\telif action == \"set\":\n\t\t\tif (_qname, _rtype) == (qname, rtype):\n\t\t\t\tif _value == value:\n\t\t\t\t\t# Flag that the record already exists, don't\n\t\t\t\t\t# need to add it.\n\t\t\t\t\tneeds_add = False\n\t\t\t\telse:\n\t\t\t\t\t# Drop any other values for this (qname, rtype).\n\t\t\t\t\tmade_change = True\n\t\t\t\t\tcontinue\n\t\telif action == \"remove\":\n\t\t\tif (_qname, _rtype, _value) == (qname, rtype, value):\n\t\t\t\t# Drop this record.\n\t\t\t\tmade_change = True\n\t\t\t\tcontinue\n\t\t\tif value is None and (_qname, _rtype) == (qname, rtype):\n\t\t\t\t# Drop all qname-rtype records.\n\t\t\t\tmade_change = True\n\t\t\t\tcontinue\n\t\telse:\n\t\t\traise ValueError(\"Invalid action: \" + action)\n\n\t\t# Preserve this record.\n\t\tnewconfig.append((_qname, _rtype, _value))\n\n\tif action in {\"add\", \"set\"} and needs_add and value is not None:\n\t\tnewconfig.append((qname, rtype, value))\n\t\tmade_change = True\n\n\tif made_change:\n\t\t# serialize & save\n\t\twrite_custom_dns_config(newconfig, env)\n\treturn made_change\n\n########################################################################\n\ndef get_secondary_dns(custom_dns, mode=None):\n\tresolver = dns.resolver.get_default_resolver()\n\tresolver.timeout = 10\n\tresolver.lifetime = 10\n\n\tvalues = []\n\tfor qname, _rtype, value in custom_dns:\n\t\tif qname != '_secondary_nameserver': continue\n\t\tfor hostname in value.split(\" \"):\n\t\t\thostname = hostname.strip()\n\t\t\tif mode is None:\n\t\t\t\t# Just return the setting.\n\t\t\t\tvalues.append(hostname)\n\t\t\t\tcontinue\n\n\t\t\t# If the entry starts with \"xfr:\" only include it in the zone transfer settings.\n\t\t\tif hostname.startswith(\"xfr:\"):\n\t\t\t\tif mode != \"xfr\": continue\n\t\t\t\thostname = hostname[4:]\n\n\t\t\t# If is a hostname, before including in zone xfr lines,\n\t\t\t# resolve to an IP address.\n\t\t\t# It may not resolve to IPv6, so don't throw an exception if it\n\t\t\t# doesn't. Skip the entry if there is a DNS error.\n\t\t\tif mode == \"xfr\":\n\t\t\t\ttry:\n\t\t\t\t\tipaddress.ip_interface(hostname) # test if it's an IP address or CIDR notation\n\t\t\t\t\tvalues.append(hostname)\n\t\t\t\texcept ValueError:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tresponse = dns.resolver.resolve(hostname+'.', \"A\", raise_on_no_answer=False)\n\t\t\t\t\t\tvalues.extend(map(str, response))\n\t\t\t\t\texcept dns.exception.DNSException:\n\t\t\t\t\t\tpass\n\t\t\t\t\ttry:\n\t\t\t\t\t\tresponse = dns.resolver.resolve(hostname+'.', \"AAAA\", raise_on_no_answer=False)\n\t\t\t\t\t\tvalues.extend(map(str, response))\n\t\t\t\t\texcept dns.exception.DNSException:\n\t\t\t\t\t\tpass\n\n\t\t\telse:\n\t\t\t\tvalues.append(hostname)\n\n\treturn values\n\ndef set_secondary_dns(hostnames, env):\n\tif len(hostnames) > 0:\n\t\t# Validate that all hostnames are valid and that all zone-xfer IP addresses are valid.\n\t\tresolver = dns.resolver.get_default_resolver()\n\t\tresolver.timeout = 5\n\t\tresolver.lifetime = 5\n\n\t\tfor item in hostnames:\n\t\t\tif not item.startswith(\"xfr:\"):\n\t\t\t\t# Resolve hostname.\n\t\t\t\ttry:\n\t\t\t\t\tresolver.resolve(item, \"A\")\n\t\t\t\texcept (dns.resolver.NoNameservers, dns.resolver.NXDOMAIN, dns.resolver.NoAnswer, dns.resolver.Timeout):\n\t\t\t\t\ttry:\n\t\t\t\t\t\tresolver.resolve(item, \"AAAA\")\n\t\t\t\t\texcept (dns.resolver.NoNameservers, dns.resolver.NXDOMAIN, dns.resolver.NoAnswer, dns.resolver.Timeout):\n\t\t\t\t\t\traise ValueError(\"Could not resolve the IP address of %s.\" % item)\n\t\t\telse:\n\t\t\t\t# Validate IP address.\n\t\t\t\ttry:\n\t\t\t\t\tif \"/\" in item[4:]:\n\t\t\t\t\t\tipaddress.ip_network(item[4:]) # raises a ValueError if there's a problem\n\t\t\t\t\telse:\n\t\t\t\t\t\tipaddress.ip_address(item[4:]) # raises a ValueError if there's a problem\n\t\t\t\texcept ValueError:\n\t\t\t\t\traise ValueError(\"'%s' is not an IPv4 or IPv6 address or subnet.\" % item[4:])\n\n\t\t# Set.\n\t\tset_custom_dns_record(\"_secondary_nameserver\", \"A\", \" \".join(hostnames), \"set\", env)\n\telse:\n\t\t# Clear.\n\t\tset_custom_dns_record(\"_secondary_nameserver\", \"A\", None, \"set\", env)\n\n\t# Apply.\n\treturn do_dns_update(env)\n\n\ndef get_custom_dns_records(custom_dns, qname, rtype):\n\tfor qname1, rtype1, value in custom_dns:\n\t\tif qname1 == qname and rtype1 == rtype:\n\t\t\tyield value\n\n########################################################################\n\ndef build_recommended_dns(env):\n\tret = []\n\tfor (domain, _zonefile, records) in build_zones(env):\n\t\t# remove records that we don't display\n\t\trecords = [r for r in records if r[3] is not False]\n\n\t\t# put Required at the top, then Recommended, then everythiing else\n\t\trecords.sort(key = lambda r : 0 if r[3].startswith(\"Required.\") else (1 if r[3].startswith(\"Recommended.\") else 2))\n\n\t\t# expand qnames\n\t\tfor i in range(len(records)):\n\t\t\tqname = domain if records[i][0] is None else records[i][0] + \".\" + domain\n\n\t\t\trecords[i] = {\n\t\t\t\t\"qname\": qname,\n\t\t\t\t\"rtype\": records[i][1],\n\t\t\t\t\"value\": records[i][2],\n\t\t\t\t\"explanation\": records[i][3],\n\t\t\t}\n\n\t\t# return\n\t\tret.append((domain, records))\n\treturn ret\n\nif __name__ == \"__main__\":\n\tfrom utils import load_environment\n\tenv = load_environment()\n\tif sys.argv[-1] == \"--lint\":\n\t\twrite_custom_dns_config(get_custom_dns_config(env), env)\n\telse:\n\t\tfor _zone, records in build_recommended_dns(env):\n\t\t\tfor record in records:\n\t\t\t\tprint(\"; \" + record['explanation'])\n\t\t\t\tprint(record['qname'], record['rtype'], record['value'], sep=\"\\t\")\n\t\t\t\tprint()\n\n\n\nFile: management/mail_log.py\n\n#!/usr/local/lib/mailinabox/env/bin/python\nimport argparse\nimport datetime\nimport gzip\nimport os.path\nimport re\nimport shutil\nimport tempfile\nimport textwrap\nfrom collections import defaultdict, OrderedDict\n\nimport dateutil.parser\nimport time\n\nfrom dateutil.relativedelta import relativedelta\n\nimport utils\n\n\nLOG_FILES = (\n '/var/log/mail.log.6.gz',\n '/var/log/mail.log.5.gz',\n '/var/log/mail.log.4.gz',\n '/var/log/mail.log.3.gz',\n '/var/log/mail.log.2.gz',\n '/var/log/mail.log.1',\n '/var/log/mail.log',\n)\n\nTIME_DELTAS = OrderedDict([\n ('all', datetime.timedelta(weeks=52)),\n ('month', datetime.timedelta(weeks=4)),\n ('2weeks', datetime.timedelta(days=14)),\n ('week', datetime.timedelta(days=7)),\n ('2days', datetime.timedelta(days=2)),\n ('day', datetime.timedelta(days=1)),\n ('12hours', datetime.timedelta(hours=12)),\n ('6hours', datetime.timedelta(hours=6)),\n ('hour', datetime.timedelta(hours=1)),\n ('30min', datetime.timedelta(minutes=30)),\n ('10min', datetime.timedelta(minutes=10)),\n ('5min', datetime.timedelta(minutes=5)),\n ('min', datetime.timedelta(minutes=1)),\n ('today', datetime.datetime.now() - datetime.datetime.now().replace(hour=0, minute=0, second=0))\n])\n\nEND_DATE = NOW = datetime.datetime.now()\nSTART_DATE = None\n\nVERBOSE = False\n\n# List of strings to filter users with\nFILTERS = None\n\n# What to show (with defaults)\nSCAN_OUT = True # Outgoing email\nSCAN_IN = True # Incoming email\nSCAN_DOVECOT_LOGIN = True # Dovecot Logins\nSCAN_GREY = False # Greylisted email\nSCAN_BLOCKED = False # Rejected email\n\n\ndef scan_files(collector):\n \"\"\" Scan files until they run out or the earliest date is reached \"\"\"\n\n stop_scan = False\n\n for fn in LOG_FILES:\n\n tmp_file = None\n\n if not os.path.exists(fn):\n continue\n elif fn[-3:] == '.gz':\n tmp_file = tempfile.NamedTemporaryFile()\n with gzip.open(fn, 'rb') as f:\n shutil.copyfileobj(f, tmp_file)\n\n if VERBOSE:\n print(\"Processing file\", fn, \"...\")\n fn = tmp_file.name if tmp_file else fn\n\n for line in readline(fn):\n if scan_mail_log_line(line.strip(), collector) is False:\n if stop_scan:\n return\n stop_scan = True\n else:\n stop_scan = False\n\n\n\ndef scan_mail_log(env):\n \"\"\" Scan the system's mail log files and collect interesting data\n\n This function scans the 2 most recent mail log files in /var/log/.\n\n Args:\n env (dict): Dictionary containing MiaB settings\n\n \"\"\"\n\n collector = {\n \"scan_count\": 0, # Number of lines scanned\n \"parse_count\": 0, # Number of lines parsed (i.e. that had their contents examined)\n \"scan_time\": time.time(), # The time in seconds the scan took\n \"sent_mail\": OrderedDict(), # Data about email sent by users\n \"received_mail\": OrderedDict(), # Data about email received by users\n \"logins\": OrderedDict(), # Data about login activity\n \"postgrey\": {}, # Data about greylisting of email addresses\n \"rejected\": OrderedDict(), # Emails that were blocked\n \"known_addresses\": None, # Addresses handled by the Miab installation\n \"other-services\": set(),\n }\n\n try:\n import mailconfig\n collector[\"known_addresses\"] = (set(mailconfig.get_mail_users(env)) |\n {alias[0] for alias in mailconfig.get_mail_aliases(env)})\n except ImportError:\n pass\n\n print(f\"Scanning logs from {START_DATE:%Y-%m-%d %H:%M:%S} to {END_DATE:%Y-%m-%d %H:%M:%S}\"\n )\n\n # Scan the lines in the log files until the date goes out of range\n scan_files(collector)\n\n if not collector[\"scan_count\"]:\n print(\"No log lines scanned...\")\n return\n\n collector[\"scan_time\"] = time.time() - collector[\"scan_time\"]\n\n print(\"{scan_count} Log lines scanned, {parse_count} lines parsed in {scan_time:.2f} \"\n \"seconds\\n\".format(**collector))\n\n # Print Sent Mail report\n\n if collector[\"sent_mail\"]:\n msg = \"Sent email\"\n print_header(msg)\n\n data = OrderedDict(sorted(collector[\"sent_mail\"].items(), key=email_sort))\n\n print_user_table(\n data.keys(),\n data=[\n (\"sent\", [u[\"sent_count\"] for u in data.values()]),\n (\"hosts\", [len(u[\"hosts\"]) for u in data.values()]),\n ],\n sub_data=[\n (\"sending hosts\", [u[\"hosts\"] for u in data.values()]),\n ],\n activity=[\n (\"sent\", [u[\"activity-by-hour\"] for u in data.values()]),\n ],\n earliest=[u[\"earliest\"] for u in data.values()],\n latest=[u[\"latest\"] for u in data.values()],\n )\n\n accum = defaultdict(int)\n data = collector[\"sent_mail\"].values()\n\n for h in range(24):\n accum[h] = sum(d[\"activity-by-hour\"][h] for d in data)\n\n print_time_table(\n [\"sent\"],\n [accum]\n )\n\n # Print Received Mail report\n\n if collector[\"received_mail\"]:\n msg = \"Received email\"\n print_header(msg)\n\n data = OrderedDict(sorted(collector[\"received_mail\"].items(), key=email_sort))\n\n print_user_table(\n data.keys(),\n data=[\n (\"received\", [u[\"received_count\"] for u in data.values()]),\n ],\n activity=[\n (\"sent\", [u[\"activity-by-hour\"] for u in data.values()]),\n ],\n earliest=[u[\"earliest\"] for u in data.values()],\n latest=[u[\"latest\"] for u in data.values()],\n )\n\n accum = defaultdict(int)\n for h in range(24):\n accum[h] = sum(d[\"activity-by-hour\"][h] for d in data.values())\n\n print_time_table(\n [\"received\"],\n [accum]\n )\n\n # Print login report\n\n if collector[\"logins\"]:\n msg = \"User logins per hour\"\n print_header(msg)\n\n data = OrderedDict(sorted(collector[\"logins\"].items(), key=email_sort))\n\n # Get a list of all of the protocols seen in the logs in reverse count order.\n all_protocols = defaultdict(int)\n for u in data.values():\n for protocol_name, count in u[\"totals_by_protocol\"].items():\n all_protocols[protocol_name] += count\n all_protocols = [k for k, v in sorted(all_protocols.items(), key=lambda kv : -kv[1])]\n\n print_user_table(\n data.keys(),\n data=[\n (protocol_name, [\n round(u[\"totals_by_protocol\"][protocol_name] / (u[\"latest\"]-u[\"earliest\"]).total_seconds() * 60*60, 1)\n if (u[\"latest\"]-u[\"earliest\"]).total_seconds() > 0\n else 0 # prevent division by zero\n for u in data.values()])\n for protocol_name in all_protocols\n ],\n sub_data=[\n (\"Protocol and Source\", [[\n f\"{protocol_name} {host}: {count} times\"\n for (protocol_name, host), count\n in sorted(u[\"totals_by_protocol_and_host\"].items(), key=lambda kv:-kv[1])\n ] for u in data.values()])\n ],\n activity=[\n (protocol_name, [u[\"activity-by-hour\"][protocol_name] for u in data.values()])\n for protocol_name in all_protocols\n ],\n earliest=[u[\"earliest\"] for u in data.values()],\n latest=[u[\"latest\"] for u in data.values()],\n numstr=lambda n : str(round(n, 1)),\n )\n\n accum = { protocol_name: defaultdict(int) for protocol_name in all_protocols }\n for h in range(24):\n for protocol_name in all_protocols:\n accum[protocol_name][h] = sum(d[\"activity-by-hour\"][protocol_name][h] for d in data.values())\n\n print_time_table(\n all_protocols,\n [accum[protocol_name] for protocol_name in all_protocols]\n )\n\n if collector[\"postgrey\"]:\n msg = \"Greylisted Email {:%Y-%m-%d %H:%M:%S} and {:%Y-%m-%d %H:%M:%S}\"\n print_header(msg.format(START_DATE, END_DATE))\n\n print(textwrap.fill(\n \"The following mail was greylisted, meaning the emails were temporarily rejected. \"\n \"Legitimate senders must try again after three minutes.\",\n width=80, initial_indent=\" \", subsequent_indent=\" \"\n ), end='\\n\\n')\n\n data = OrderedDict(sorted(collector[\"postgrey\"].items(), key=email_sort))\n users = []\n received = []\n senders = []\n sender_clients = []\n delivered_dates = []\n\n for recipient in data:\n sorted_recipients = sorted(data[recipient].items(), key=lambda kv: kv[1][0] or kv[1][1])\n for (client_address, sender), (first_date, delivered_date) in sorted_recipients:\n if first_date:\n users.append(recipient)\n received.append(first_date)\n senders.append(sender)\n delivered_dates.append(delivered_date)\n sender_clients.append(client_address)\n\n print_user_table(\n users,\n data=[\n (\"received\", received),\n (\"sender\", senders),\n (\"delivered\", [str(d) or \"no retry yet\" for d in delivered_dates]),\n (\"sending host\", sender_clients)\n ],\n delimit=True,\n )\n\n if collector[\"rejected\"]:\n msg = \"Blocked Email {:%Y-%m-%d %H:%M:%S} and {:%Y-%m-%d %H:%M:%S}\"\n print_header(msg.format(START_DATE, END_DATE))\n\n data = OrderedDict(sorted(collector[\"rejected\"].items(), key=email_sort))\n\n rejects = []\n\n if VERBOSE:\n for user_data in data.values():\n user_rejects = []\n for date, sender, message in user_data[\"blocked\"]:\n if len(sender) > 64:\n sender = sender[:32] + \"…\" + sender[-32:]\n user_rejects.extend((f'{date} - {sender} ', ' %s' % message))\n rejects.append(user_rejects)\n\n print_user_table(\n data.keys(),\n data=[\n (\"blocked\", [len(u[\"blocked\"]) for u in data.values()]),\n ],\n sub_data=[\n (\"blocked emails\", rejects),\n ],\n earliest=[u[\"earliest\"] for u in data.values()],\n latest=[u[\"latest\"] for u in data.values()],\n )\n\n if collector[\"other-services\"] and VERBOSE and False:\n print_header(\"Other services\")\n print(\"The following unknown services were found in the log file.\")\n print(\" \", *sorted(collector[\"other-services\"]), sep='\\n│ ')\n\n\ndef scan_mail_log_line(line, collector):\n \"\"\" Scan a log line and extract interesting data \"\"\"\n\n m = re.match(r\"(\\w+[\\s]+\\d+ \\d+:\\d+:\\d+) ([\\w]+ )?([\\w\\-/]+)[^:]*: (.*)\", line)\n\n if not m:\n return True\n\n date, _system, service, log = m.groups()\n collector[\"scan_count\"] += 1\n\n # print()\n # print(\"date:\", date)\n # print(\"host:\", system)\n # print(\"service:\", service)\n # print(\"log:\", log)\n\n # Replaced the dateutil parser for a less clever way of parser that is roughly 4 times faster.\n # date = dateutil.parser.parse(date)\n\n # strptime fails on Feb 29 with ValueError: day is out of range for month if correct year is not provided.\n # See https://bugs.python.org/issue26460\n date = datetime.datetime.strptime(str(NOW.year) + ' ' + date, '%Y %b %d %H:%M:%S')\n # if log date in future, step back a year\n if date > NOW:\n date = date.replace(year = NOW.year - 1)\n #print(\"date:\", date)\n\n # Check if the found date is within the time span we are scanning\n if date > END_DATE:\n # Don't process, and halt\n return False\n elif date < START_DATE:\n # Don't process, but continue\n return True\n\n if service == \"postfix/submission/smtpd\":\n if SCAN_OUT:\n scan_postfix_submission_line(date, log, collector)\n elif service == \"postfix/lmtp\":\n if SCAN_IN:\n scan_postfix_lmtp_line(date, log, collector)\n elif service.endswith(\"-login\"):\n if SCAN_DOVECOT_LOGIN:\n scan_dovecot_login_line(date, log, collector, service[:4])\n elif service == \"postgrey\":\n if SCAN_GREY:\n scan_postgrey_line(date, log, collector)\n elif service == \"postfix/smtpd\":\n if SCAN_BLOCKED:\n scan_postfix_smtpd_line(date, log, collector)\n elif service in {\"postfix/qmgr\", \"postfix/pickup\", \"postfix/cleanup\", \"postfix/scache\",\n \"spampd\", \"postfix/anvil\", \"postfix/master\", \"opendkim\", \"postfix/lmtp\",\n \"postfix/tlsmgr\", \"anvil\"}:\n # nothing to look at\n return True\n else:\n collector[\"other-services\"].add(service)\n return True\n\n collector[\"parse_count\"] += 1\n return True\n\n\ndef scan_postgrey_line(date, log, collector):\n \"\"\" Scan a postgrey log line and extract interesting data \"\"\"\n\n m = re.match(r\"action=(greylist|pass), reason=(.*?), (?:delay=\\d+, )?client_name=(.*), \"\n \"client_address=(.*), sender=(.*), recipient=(.*)\",\n log)\n\n if m:\n\n action, reason, client_name, client_address, sender, user = m.groups()\n\n if user_match(user):\n\n # Might be useful to group services that use a lot of mail different servers on sub\n # domains like 1.domein.com\n\n # if '.' in client_name:\n # addr = client_name.split('.')\n # if len(addr) > 2:\n # client_name = '.'.join(addr[1:])\n\n key = (client_address if client_name == 'unknown' else client_name, sender)\n\n rep = collector[\"postgrey\"].setdefault(user, {})\n\n if action == \"greylist\" and reason == \"new\":\n rep[key] = (date, rep[key][1] if key in rep else None)\n elif action == \"pass\":\n rep[key] = (rep[key][0] if key in rep else None, date)\n\n\ndef scan_postfix_smtpd_line(date, log, collector):\n \"\"\" Scan a postfix smtpd log line and extract interesting data \"\"\"\n\n # Check if the incoming mail was rejected\n\n m = re.match(\"NOQUEUE: reject: RCPT from .*?: (.*?); from=<(.*?)> to=<(.*?)>\", log)\n\n if m:\n message, sender, user = m.groups()\n\n # skip this, if reported in the greylisting report\n if \"Recipient address rejected: Greylisted\" in message:\n return\n\n # only log mail to known recipients\n if user_match(user) and (collector[\"known_addresses\"] is None or user in collector[\"known_addresses\"]):\n data = collector[\"rejected\"].get(\n user,\n {\n \"blocked\": [],\n \"earliest\": None,\n \"latest\": None,\n }\n )\n # simplify this one\n m = re.search(\n r\"Client host \\[(.*?)\\] blocked using zen.spamhaus.org; (.*)\", message\n )\n if m:\n message = \"ip blocked: \" + m.group(2)\n else:\n # simplify this one too\n m = re.search(\n r\"Sender address \\[.*@(.*)\\] blocked using dbl.spamhaus.org; (.*)\", message\n )\n if m:\n message = \"domain blocked: \" + m.group(2)\n\n if data[\"earliest\"] is None:\n data[\"earliest\"] = date\n data[\"latest\"] = date\n data[\"blocked\"].append((date, sender, message))\n\n collector[\"rejected\"][user] = data\n\n\ndef scan_dovecot_login_line(date, log, collector, protocol_name):\n \"\"\" Scan a dovecot login log line and extract interesting data \"\"\"\n\n m = re.match(\"Info: Login: user=<(.*?)>, method=PLAIN, rip=(.*?),\", log)\n\n if m:\n # TODO: CHECK DIT\n user, host = m.groups()\n\n if user_match(user):\n add_login(user, date, protocol_name, host, collector)\n\n\ndef add_login(user, date, protocol_name, host, collector):\n # Get the user data, or create it if the user is new\n data = collector[\"logins\"].get(\n user,\n {\n \"earliest\": None,\n \"latest\": None,\n \"totals_by_protocol\": defaultdict(int),\n \"totals_by_protocol_and_host\": defaultdict(int),\n \"activity-by-hour\": defaultdict(lambda : defaultdict(int)),\n }\n )\n\n if data[\"earliest\"] is None:\n data[\"earliest\"] = date\n data[\"latest\"] = date\n\n data[\"totals_by_protocol\"][protocol_name] += 1\n data[\"totals_by_protocol_and_host\"][(protocol_name, host)] += 1\n\n if host not in {\"127.0.0.1\", \"::1\"} or True:\n data[\"activity-by-hour\"][protocol_name][date.hour] += 1\n\n collector[\"logins\"][user] = data\n\n\ndef scan_postfix_lmtp_line(date, log, collector):\n \"\"\" Scan a postfix lmtp log line and extract interesting data\n\n It is assumed that every log of postfix/lmtp indicates an email that was successfully\n received by Postfix.\n\n \"\"\"\n\n m = re.match(r\"([A-Z0-9]+): to=<(\\S+)>, .* Saved\", log)\n\n if m:\n _, user = m.groups()\n\n if user_match(user):\n # Get the user data, or create it if the user is new\n data = collector[\"received_mail\"].get(\n user,\n {\n \"received_count\": 0,\n \"earliest\": None,\n \"latest\": None,\n \"activity-by-hour\": defaultdict(int),\n }\n )\n\n data[\"received_count\"] += 1\n data[\"activity-by-hour\"][date.hour] += 1\n\n if data[\"earliest\"] is None:\n data[\"earliest\"] = date\n data[\"latest\"] = date\n\n collector[\"received_mail\"][user] = data\n\n\ndef scan_postfix_submission_line(date, log, collector):\n \"\"\" Scan a postfix submission log line and extract interesting data\n\n Lines containing a sasl_method with the values PLAIN or LOGIN are assumed to indicate a sent\n email.\n\n \"\"\"\n\n # Match both the 'plain' and 'login' sasl methods, since both authentication methods are\n # allowed by Dovecot. Exclude trailing comma after the username when additional fields\n\t# follow after.\n m = re.match(r\"([A-Z0-9]+): client=(\\S+), sasl_method=(PLAIN|LOGIN), sasl_username=(\\S+)(?%d} \" % max(2, max_len)\n\n for i, d in enumerate(data):\n lines[i] += base.format(d[h])\n\n lines.insert(0, \"┬ totals by time of day:\")\n lines.append(\"└\" + (len(lines[-1]) - 2) * \"─\")\n\n if do_print:\n print(\"\\n\".join(lines))\n return None\n else:\n return lines\n\n\ndef print_user_table(users, data=None, sub_data=None, activity=None, latest=None, earliest=None,\n delimit=False, numstr=str):\n str_temp = \"{:<32} \"\n lines = []\n data = data or []\n\n col_widths = len(data) * [0]\n col_left = len(data) * [False]\n vert_pos = 0\n\n do_accum = all(isinstance(n, (int, float)) for _, d in data for n in d)\n data_accum = len(data) * ([0] if do_accum else [\" \"])\n\n last_user = None\n\n for row, user in enumerate(users):\n\n if delimit:\n if last_user and last_user != user:\n lines.append(len(lines[-1]) * \"…\")\n last_user = user\n\n line = \"{:<32} \".format(user[:31] + \"…\" if len(user) > 32 else user)\n\n for col, (l, d) in enumerate(data):\n if isinstance(d[row], str):\n col_str = str_temp.format(d[row][:31] + \"…\" if len(d[row]) > 32 else d[row])\n col_left[col] = True\n elif isinstance(d[row], datetime.datetime):\n col_str = f\"{d[row]!s:<20}\"\n col_left[col] = True\n else:\n temp = \"{:>%s}\" % max(5, len(l) + 1, len(str(d[row])) + 1)\n col_str = temp.format(str(d[row]))\n col_widths[col] = max(col_widths[col], len(col_str))\n line += col_str\n\n if do_accum:\n data_accum[col] += d[row]\n\n try:\n if None not in [latest, earliest]: # noqa PLR6201\n vert_pos = len(line)\n e = earliest[row]\n l = latest[row]\n timespan = relativedelta(l, e)\n if timespan.months:\n temp = \" │ {:0.1f} months\"\n line += temp.format(timespan.months + timespan.days / 30.0)\n elif timespan.days:\n temp = \" │ {:0.1f} days\"\n line += temp.format(timespan.days + timespan.hours / 24.0)\n elif (e.hour, e.minute) == (l.hour, l.minute):\n temp = \" │ {:%H:%M}\"\n line += temp.format(e)\n else:\n temp = \" │ {:%H:%M} - {:%H:%M}\"\n line += temp.format(e, l)\n\n except KeyError:\n pass\n\n lines.append(line.rstrip())\n\n try:\n if VERBOSE:\n if sub_data is not None:\n for l, d in sub_data:\n if d[row]:\n lines.extend(('┬', '│ %s' % l, '├─%s─' % (len(l) * '─'), '│'))\n max_len = 0\n for v in list(d[row]):\n lines.append(\"│ %s\" % v)\n max_len = max(max_len, len(v))\n lines.append(\"└\" + (max_len + 1) * \"─\")\n\n if activity is not None:\n lines.extend(print_time_table(\n [label for label, _ in activity],\n [data[row] for _, data in activity],\n do_print=False\n ))\n\n except KeyError:\n pass\n\n header = str_temp.format(\"\")\n\n for col, (l, _) in enumerate(data):\n if col_left[col]:\n header += l.ljust(max(5, len(l) + 1, col_widths[col]))\n else:\n header += l.rjust(max(5, len(l) + 1, col_widths[col]))\n\n if None not in [latest, earliest]: # noqa PLR6201\n header += \" │ timespan \"\n\n lines.insert(0, header.rstrip())\n\n table_width = max(len(l) for l in lines)\n t_line = table_width * \"─\"\n b_line = table_width * \"─\"\n\n if vert_pos:\n t_line = t_line[:vert_pos + 1] + \"┼\" + t_line[vert_pos + 2:]\n b_line = b_line[:vert_pos + 1] + (\"┬\" if VERBOSE else \"┼\") + b_line[vert_pos + 2:]\n\n lines.insert(1, t_line)\n lines.append(b_line)\n\n # Print totals\n\n data_accum = [numstr(a) for a in data_accum]\n footer = str_temp.format(\"Totals:\" if do_accum else \" \")\n for row, (l, _) in enumerate(data):\n temp = \"{:>%d}\" % max(5, len(l) + 1)\n footer += temp.format(data_accum[row])\n\n try:\n if None not in [latest, earliest]: # noqa PLR6201\n max_l = max(latest)\n min_e = min(earliest)\n timespan = relativedelta(max_l, min_e)\n if timespan.days:\n temp = \" │ {:0.2f} days\"\n footer += temp.format(timespan.days + timespan.hours / 24.0)\n elif (min_e.hour, min_e.minute) == (max_l.hour, max_l.minute):\n temp = \" │ {:%H:%M}\"\n footer += temp.format(min_e)\n else:\n temp = \" │ {:%H:%M} - {:%H:%M}\"\n footer += temp.format(min_e, max_l)\n\n except KeyError:\n pass\n\n lines.append(footer)\n\n print(\"\\n\".join(lines))\n\n\ndef print_header(msg):\n print('\\n' + msg)\n print(\"═\" * len(msg), '\\n')\n\n\nif __name__ == \"__main__\":\n try:\n env_vars = utils.load_environment()\n except FileNotFoundError:\n env_vars = {}\n\n parser = argparse.ArgumentParser(\n description=\"Scan the mail log files for interesting data. By default, this script \"\n \"shows today's incoming and outgoing mail statistics. This script was (\"\n \"re)written for the Mail-in-a-box email server.\"\n \"https://github.com/mail-in-a-box/mailinabox\",\n add_help=False\n )\n\n # Switches to determine what to parse and what to ignore\n\n parser.add_argument(\"-r\", \"--received\", help=\"Scan for received emails.\",\n action=\"store_true\")\n parser.add_argument(\"-s\", \"--sent\", help=\"Scan for sent emails.\",\n action=\"store_true\")\n parser.add_argument(\"-l\", \"--logins\", help=\"Scan for user logins to IMAP/POP3.\",\n action=\"store_true\")\n parser.add_argument(\"-g\", \"--grey\", help=\"Scan for greylisted emails.\",\n action=\"store_true\")\n parser.add_argument(\"-b\", \"--blocked\", help=\"Scan for blocked emails.\",\n action=\"store_true\")\n\n parser.add_argument(\"-t\", \"--timespan\", choices=TIME_DELTAS.keys(), default='today',\n metavar='