{ // 获取包含Hugging Face文本的span元素 const spans = link.querySelectorAll('span.whitespace-nowrap, span.hidden.whitespace-nowrap'); spans.forEach(span => { if (span.textContent && span.textContent.trim().match(/Hugging\s*Face/i)) { span.textContent = 'AI快站'; } }); }); // 替换logo图片的alt属性 document.querySelectorAll('img[alt*="Hugging"], img[alt*="Face"]').forEach(img => { if (img.alt.match(/Hugging\s*Face/i)) { img.alt = 'AI快站 logo'; } }); } // 替换导航栏中的链接 function replaceNavigationLinks() { // 已替换标记,防止重复运行 if (window._navLinksReplaced) { return; } // 已经替换过的链接集合,防止重复替换 const replacedLinks = new Set(); // 只在导航栏区域查找和替换链接 const headerArea = document.querySelector('header') || document.querySelector('nav'); if (!headerArea) { return; } // 在导航区域内查找链接 const navLinks = headerArea.querySelectorAll('a'); navLinks.forEach(link => { // 如果已经替换过,跳过 if (replacedLinks.has(link)) return; const linkText = link.textContent.trim(); const linkHref = link.getAttribute('href') || ''; // 替换Spaces链接 - 仅替换一次 if ( (linkHref.includes('/spaces') || linkHref === '/spaces' || linkText === 'Spaces' || linkText.match(/^s*Spacess*$/i)) && linkText !== 'OCR模型免费转Markdown' && linkText !== 'OCR模型免费转Markdown' ) { link.textContent = 'OCR模型免费转Markdown'; link.href = 'https://fast360.xyz'; link.setAttribute('target', '_blank'); link.setAttribute('rel', 'noopener noreferrer'); replacedLinks.add(link); } // 删除Posts链接 else if ( (linkHref.includes('/posts') || linkHref === '/posts' || linkText === 'Posts' || linkText.match(/^s*Postss*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } // 替换Docs链接 - 仅替换一次 else if ( (linkHref.includes('/docs') || linkHref === '/docs' || linkText === 'Docs' || linkText.match(/^s*Docss*$/i)) && linkText !== '模型下载攻略' ) { link.textContent = '模型下载攻略'; link.href = '/'; replacedLinks.add(link); } // 删除Enterprise链接 else if ( (linkHref.includes('/enterprise') || linkHref === '/enterprise' || linkText === 'Enterprise' || linkText.match(/^s*Enterprises*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } }); // 查找可能嵌套的Spaces和Posts文本 const textNodes = []; function findTextNodes(element) { if (element.nodeType === Node.TEXT_NODE) { const text = element.textContent.trim(); if (text === 'Spaces' || text === 'Posts' || text === 'Enterprise') { textNodes.push(element); } } else { for (const child of element.childNodes) { findTextNodes(child); } } } // 只在导航区域内查找文本节点 findTextNodes(headerArea); // 替换找到的文本节点 textNodes.forEach(node => { const text = node.textContent.trim(); if (text === 'Spaces') { node.textContent = node.textContent.replace(/Spaces/g, 'OCR模型免费转Markdown'); } else if (text === 'Posts') { // 删除Posts文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } else if (text === 'Enterprise') { // 删除Enterprise文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } }); // 标记已替换完成 window._navLinksReplaced = true; } // 替换代码区域中的域名 function replaceCodeDomains() { // 特别处理span.hljs-string和span.njs-string元素 document.querySelectorAll('span.hljs-string, span.njs-string, span[class*="hljs-string"], span[class*="njs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换hljs-string类的span中的域名(移除多余的转义符号) document.querySelectorAll('span.hljs-string, span[class*="hljs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换pre和code标签中包含git clone命令的域名 document.querySelectorAll('pre, code').forEach(element => { if (element.textContent && element.textContent.includes('git clone')) { const text = element.innerHTML; if (text.includes('huggingface.co')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 处理特定的命令行示例 document.querySelectorAll('pre, code').forEach(element => { const text = element.innerHTML; if (text.includes('huggingface.co')) { // 针对git clone命令的专门处理 if (text.includes('git clone') || text.includes('GIT_LFS_SKIP_SMUDGE=1')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 特别处理模型下载页面上的代码片段 document.querySelectorAll('.flex.border-t, .svelte_hydrator, .inline-block').forEach(container => { const content = container.innerHTML; if (content && content.includes('huggingface.co')) { container.innerHTML = content.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 特别处理模型仓库克隆对话框中的代码片段 try { // 查找包含"Clone this model repository"标题的对话框 const cloneDialog = document.querySelector('.svelte_hydration_boundary, [data-target="MainHeader"]'); if (cloneDialog) { // 查找对话框中所有的代码片段和命令示例 const codeElements = cloneDialog.querySelectorAll('pre, code, span'); codeElements.forEach(element => { if (element.textContent && element.textContent.includes('huggingface.co')) { if (element.innerHTML.includes('huggingface.co')) { element.innerHTML = element.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { element.textContent = element.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); } // 更精确地定位克隆命令中的域名 document.querySelectorAll('[data-target]').forEach(container => { const codeBlocks = container.querySelectorAll('pre, code, span.hljs-string'); codeBlocks.forEach(block => { if (block.textContent && block.textContent.includes('huggingface.co')) { if (block.innerHTML.includes('huggingface.co')) { block.innerHTML = block.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { block.textContent = block.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); }); } catch (e) { // 错误处理但不打印日志 } } // 当DOM加载完成后执行替换 if (document.readyState === 'loading') { document.addEventListener('DOMContentLoaded', () => { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); }); } else { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); } // 增加一个MutationObserver来处理可能的动态元素加载 const observer = new MutationObserver(mutations => { // 检查是否导航区域有变化 const hasNavChanges = mutations.some(mutation => { // 检查是否存在header或nav元素变化 return Array.from(mutation.addedNodes).some(node => { if (node.nodeType === Node.ELEMENT_NODE) { // 检查是否是导航元素或其子元素 if (node.tagName === 'HEADER' || node.tagName === 'NAV' || node.querySelector('header, nav')) { return true; } // 检查是否在导航元素内部 let parent = node.parentElement; while (parent) { if (parent.tagName === 'HEADER' || parent.tagName === 'NAV') { return true; } parent = parent.parentElement; } } return false; }); }); // 只在导航区域有变化时执行替换 if (hasNavChanges) { // 重置替换状态,允许再次替换 window._navLinksReplaced = false; replaceHeaderBranding(); replaceNavigationLinks(); } }); // 开始观察document.body的变化,包括子节点 if (document.body) { observer.observe(document.body, { childList: true, subtree: true }); } else { document.addEventListener('DOMContentLoaded', () => { observer.observe(document.body, { childList: true, subtree: true }); }); } })(); \n\n\"\"\"\n \n\n myparser = MyHTMLParser()\n myparser.feed(page)\n\n\nif __name__ == '__main__':\n main()\n"},"repo_name":{"kind":"string","value":"ahbaid/learn"},"sub_path":{"kind":"string","value":"python/scae/class-08/html1.py"},"file_name":{"kind":"string","value":"html1.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":495,"string":"495"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"en"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":1,"string":"1"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"6"}}},{"rowIdx":359,"cells":{"seq_id":{"kind":"string","value":"13658425408"},"text":{"kind":"string","value":"import numpy as np\nimport pandas as pd\nimport xarray as xr\nimport matplotlib.pyplot as plt\n\ndef summarize_qc_resamples(input_df, verbose=False, **resample_kwargs):\n\n time_list = list()\n data_list = list()\n\n for time, df in input_df.resample(**resample_kwargs):\n if verbose == True:\n print(\"Currently working on: {}\".format(time))\n time_list.append(time)\n df_stats = df.qc.describe()\n data_list.append(df_stats.values)\n else:\n measures = df_stats.index.to_list()\n variables = df.columns.to_list()\n\n attrs = resample_kwargs\n\n return xr.DataArray(np.dstack(data_list),\n coords = [measures, variables, time_list],\n dims = ['measure','variable','time'],\n name = \"qc_summary\",\n attrs = attrs)\n"},"repo_name":{"kind":"string","value":"wangsen992/pyqc"},"sub_path":{"kind":"string","value":"src/pyqc/tools.py"},"file_name":{"kind":"string","value":"tools.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":855,"string":"855"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"en"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":0,"string":"0"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"6"}}},{"rowIdx":360,"cells":{"seq_id":{"kind":"string","value":"19504742337"},"text":{"kind":"string","value":"from igraph import Graph\nfrom igraph import plot\n\ngrafo = Graph(edges = [(0,1),(2,3),(0,2),(0,3)], directed = True)\ngrafo.vs['label'] =['Fernando', 'Pedro', 'Jose', 'Antonio']\ngrafo.vs['nota'] = [100, 40, 60, 20]\ngrafo.es['tipoAmizade'] = ['Amigo', 'Inimigo', 'Amigo']\ngrafo.es['devendo'] = [1,3,2,5]\n\ngrafo.vs['color'] = ['red', 'yellow','orange', 'green']\n\nplot(grafo, bbox =(300,300),\n vertex_size = grafo.vs['nota'],\n edge_width = grafo.es['devendo'],\n vertex_color = grafo.vs['color'],\n edge_curved = 0.4,\n vertex_shape = 'square')\n"},"repo_name":{"kind":"string","value":"guibarreta1993Average/data_science_udemy"},"sub_path":{"kind":"string","value":"05_Grafos/aula34_impressao.py"},"file_name":{"kind":"string","value":"aula34_impressao.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":557,"string":"557"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"en"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":0,"string":"0"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"6"}}},{"rowIdx":361,"cells":{"seq_id":{"kind":"string","value":"31148205537"},"text":{"kind":"string","value":"import argparse\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom scipy import stats\nimport pandas as pd\nimport numpy as np\nimport json\nimport os\n\ndef parse_args():\n\n parser = argparse.ArgumentParser(prog='')\n parser.add_argument('json', type=str, help='Figure1 JSON.')\n parser.add_argument('-o', '--output_dir', default='.', help='')\n args = parser.parse_args()\n\n return(args)\n\ndef b(paths, outfile):\n\n dar_enrich = pd.read_csv(paths['figure6']['b']['dar_enrichment'], sep='\\t')\n\n fp_enrich = pd.read_csv(paths['figure6']['b']['footprint_enrichment'], sep='\\t')\n\n f, axes = plt.subplots(1,2, num='b', figsize=(12, 6))\n\n fp_logp = fp_enrich['pval_enrichment'].map(lambda x: -1*np.log10(x))\n fp_logp = fp_logp.rename('footprint enrichments') \n\n dar_logp = dar_enrich['pval_enrichment'].map(lambda x: -1*np.log10(x))\n dar_logp.sort_values(ascending=False, inplace=True)\n dar_logp = dar_logp.rename('top DAR enrichments') \n dar_logp = dar_logp[:10]\n\n sns.set_style(\"whitegrid\")\n\n sns.kdeplot(dar_logp, shade=True, color=\"#E74C3C\", ax=axes[0])\n sns.kdeplot(fp_logp, shade=True, color=\"#3498DB\", ax=axes[0])\n\n axes[0].set_xlabel('-log10 pval', fontsize=15)\n\n def label_point(x, y, val, ax):\n a = pd.concat({'x': x, 'y': y, 'val': val}, axis=1)\n for i, point in a.iterrows():\n ax.text(point['x']+.02, point['y'], str(point['val']), fontsize=10)\n\n def rand_jitter(arr, c):\n stdev = c*(max(arr)-min(arr))\n return arr + stdev\n\n fp_enrich['pval_enrichment'] = -1*np.log10(fp_enrich['pval_enrichment'])\n fp_enrich.sort_values('pval_enrichment', ascending=False, inplace=True)\n fp_enrich.reset_index(drop=True, inplace=True)\n\n sns.scatterplot(x=fp_enrich.index.tolist(), y='pval_enrichment', data=fp_enrich, ax=axes[1]) \n\n# label_point(pd.Series(fp_enrich.index.tolist()[:10]), fp_enrich['pval_enrichment'][:10], fp_enrich['name'][:10], axes[1])\n axes[1].set_xticks=''\n\n f.savefig(outfile, dpi=300)\n\ndef c(paths, outfile):\n\n fp_enrich = pd.read_csv(paths['figure6']['c'], sep='\\t')\n hic_hit = fp_enrich[fp_enrich['name']=='ZNF416-Zf']\n hic_df = pd.melt(hic_hit, id_vars=None, value_vars=['target_freq', 'bg_freq'], var_name='enrichment group', value_name='% total footprints')\n hic_df.sort_values('enrichment group', inplace=True)\n\n sns.set_style(\"whitegrid\")\n f, axes = plt.subplots(1,1, num='c', figsize=(12, 12))\n\n palette = ['#ABB2B9','#A569BD']\n sns.barplot(x='enrichment group', y='% total footprints', data=hic_df, palette=palette, ax=axes)\n\n axes.set_xlabel('', fontsize=15)\n axes.set_xticks = ''\n axes.set_xticklabels([]) \n\n axes.set_ylabel('')\n\n f.savefig(outfile, dpi=300)\n\ndef main():\n\n\n args = parse_args()\n\n if not os.path.exists(args.output_dir):\n os.makedirs(args.output_dir)\n\n with open(args.json) as fp:\n paths = json.load(fp)\n\n bof = os.path.join(args.output_dir, 'Figure6b.png')\n cof = os.path.join(args.output_dir, 'Figure6c.png')\n\n b(paths, bof) \n c(paths, cof) \n\nif __name__ == '__main__':\n main()\n\n"},"repo_name":{"kind":"string","value":"perezja/Leukos"},"sub_path":{"kind":"string","value":"presentation/figure6/figure6.py"},"file_name":{"kind":"string","value":"figure6.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":3116,"string":"3,116"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"en"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":0,"string":"0"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"6"}}},{"rowIdx":362,"cells":{"seq_id":{"kind":"string","value":"5657507234"},"text":{"kind":"string","value":"import os\nfrom functools import reduce\n\n\nclass Photo:\n id = None\n layout = None # v or h\n tags = []\n\n def __init__(self, id, layout, tags):\n self.id = id\n self.layout = layout\n # self.tagalf = \"\".join(sorted(tags))\n self.tagalf = tuple(sorted(tags))\n self.tags = tags\n\n def __str__(self):\n return str(self.id) + \" - \" + \" \".join(self.tags)\n\n\nclass Slide:\n # 2 vertical or 1 horizontal\n photo_ids = []\n tags = []\n\n def __init__(self, photos):\n self.photo_ids = [str(photo.id) for photo in photos]\n self.tags = set(reduce(list.__add__, map(lambda x: list(x.tags), photos)))\n self.tags_sorted = tuple(sorted(list(self.tags)))\n\n def __str__(self):\n return \" \".join([str(x) for x in self.photo_ids]) + \" - \" + \" \".join([str(x) for x in self.tags])\n\n\nclass SlideShow:\n slides = []\n\n def __init__(self, slides=None):\n self.slides = [] if slides is None else slides\n\n def calculate_score(self):\n if len(self.slides) == 0:\n return 0\n score = 0\n for i, slide in enumerate(self.slides):\n score += self.interest_factor(i)\n return score\n\n def interest_factor(self, i):\n if i + 1 >= len(self.slides):\n return 0\n return interest_factor(self.slides[i], self.slides[i + 1])\n\n\ndef interest_factor(slide_1, slide_2):\n \"\"\" interest of slides\n Minimum between\n the number of common tags between Si and Si+1\n the number of tags in Si but not in Si+1\n the number of tags in Si+1 but not in Si\n \"\"\"\n common = set(slide_1.tags) & set(slide_2.tags)\n n_common = len(common)\n n_left = len(slide_1.tags) - len(set(slide_1.tags) & set(common))\n n_right = len(slide_2.tags) - len(set(common) & set(slide_2.tags))\n return min(n_common, n_left, n_right)\n\n\ndef n_common_tags(slide_1, slide_2):\n # return len(set(slide_1.tags) & set(slide_2.tags))\n return len(set(slide_1.tags).intersection(slide_2.tags))\n\n\ndef read_input(filepath):\n with open(filepath, 'r') as f:\n n = int(f.readline())\n i = 0\n result = []\n while i < n:\n line = f.readline()[:-1].split(\" \")\n result.append(Photo(i, line[0], line[2:]))\n i += 1\n return result\n\n\ndef write_output(slideshow, output_file):\n with open(output_file, \"w\") as f:\n f.write(str(len(slideshow.slides)) + \"\\n\")\n for slide in slideshow.slides:\n f.write(' '.join(slide.photo_ids) + \"\\n\")\n\n with open(output_file, 'rb+') as f:\n f.seek(-2, os.SEEK_END)\n f.truncate()\n\n\ndef get_slideshow(photos):\n slideshow = SlideShow()\n vert = None\n slides = []\n for photo in sorted(photos, key=lambda x: x.tagalf):\n if photo.layout == \"H\":\n slides.append(Slide([photo]))\n elif photo.layout == \"V\" and vert is None:\n vert = photo\n elif photo.layout == \"V\" and vert is not None:\n slides.append(Slide([photo, vert]))\n vert = None\n\n slides.sort(key=lambda x: x.tags_sorted)\n\n return SlideShow(slides)\n\n\ndef main():\n files = ['a_example.txt', 'b_lovely_landscapes.txt', 'c_memorable_moments.txt', 'd_pet_pictures.txt',\n 'e_shiny_selfies.txt']\n sum_score = 0\n for file in files:\n print(file)\n photos = read_input(file)\n slideshow = get_slideshow(photos)\n score = slideshow.calculate_score()\n sum_score += score\n print(\"SCORE: {}\\n\".format(score))\n write_output(slideshow, \"output/\" + file)\n print(\"END, {}\".format(sum_score))\n return None\n\n\nif __name__ == \"__main__\":\n main()\n"},"repo_name":{"kind":"string","value":"phyx4/hashcode_2019"},"sub_path":{"kind":"string","value":"main.py"},"file_name":{"kind":"string","value":"main.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":3664,"string":"3,664"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"en"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":0,"string":"0"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"6"}}},{"rowIdx":363,"cells":{"seq_id":{"kind":"string","value":"24931817284"},"text":{"kind":"string","value":"from json import dumps, loads\r\nfrom State import State\r\n\r\n\r\nclass Api:\r\n \"\"\"\r\n A class that provides methods for encoding and decoding\r\n States to and from JSON strings.\r\n\r\n Methods:\r\n - Encode(states: list[State]) -> str:\r\n Encodes a list of State objects to a JSON string.\r\n\r\n - Decode(jsonString: str) -> State:\r\n Decodes a JSON string to a State object.\r\n \"\"\"\r\n def Encode(states: list[State]) -> str:\r\n \"\"\"\r\n Encodes a list of State objects to a JSON string.\r\n\r\n Args:\r\n - states (list[State]):\r\n A list of State objects to encode.\r\n\r\n Returns:\r\n - str:\r\n A JSON string representing the list of State objects.\r\n \"\"\"\r\n return dumps([state.__dict__ for state in states])\r\n\r\n def Decode(jsonString: str) -> State:\r\n \"\"\"\r\n Decodes a JSON string to a State object.\r\n\r\n Args:\r\n - jsonString (str):\r\n A JSON string to decode.\r\n\r\n Returns:\r\n - State:\r\n A State object representing the decoded JSON string.\r\n \"\"\"\r\n obj = loads(jsonString)\r\n return State(\r\n obj['Board'],\r\n obj['Direction'],\r\n (obj['EmptyPoint']['X'], obj['EmptyPoint']['Y'])\r\n )\r\n"},"repo_name":{"kind":"string","value":"Saeed-Ayman/8-puzzle"},"sub_path":{"kind":"string","value":"API.py"},"file_name":{"kind":"string","value":"API.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":1287,"string":"1,287"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"en"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":1,"string":"1"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"6"}}},{"rowIdx":364,"cells":{"seq_id":{"kind":"string","value":"712141287"},"text":{"kind":"string","value":"#! /usr/bin/env python3\n# coding: utf-8\n\nimport os\nimport logging as lg\n\nimport pandas as pd\nimport numpy as np\n\n\nlg.basicConfig(level=lg.DEBUG)\n\n\nimport os\nimport pandas as pd\n\n\nclass SetOfParliamentMembers:\n def __init__(self, name):\n self.name = name\n\n def __repr__(self):\n return \"setOfParliamentMember: {} members\".format(len(self.dataframe))\n\n def data_from_csv(self, csv_file):\n self.dataframe = pd.read_csv(csv_file, sep=\";\", engine = 'python')\n\n def data_from_dataframe(self, dataframe):\n self.dataframe = dataframe\n\n def display_chart(self):\n # à venir, patience !\n pass\n\n def split_by_political_party(self):\n result = {}\n data = self.dataframe\n\n all_parties = data[\"parti_ratt_financier\"].dropna().unique()\n\n for party in all_parties:\n data_subset = data[data.parti_ratt_financier == party]\n subset = SetOfParliamentMembers('MPs from party \"{}\"'.format(party))\n subset.data_from_dataframe(data_subset)\n result[party] = subset\n\n return result\n\n\ndef launch_analysis(data_file, by_party=False, info=False):\n sopm = SetOfParliamentMembers(\"All MPs\")\n sopm.data_from_csv(os.path.join(\"data\", data_file))\n sopm.display_chart()\n\n if by_party:\n for party, s in sopm.split_by_political_party().items():\n s.display_chart()\n if info:\n print(sopm)\n\n\nif __name__ == \"__main__\":\n launch_analysis(\"current_mps.csv\")\n"},"repo_name":{"kind":"string","value":"honorezemagho/python-oc"},"sub_path":{"kind":"string","value":"analysis/csv.py"},"file_name":{"kind":"string","value":"csv.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":1496,"string":"1,496"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"en"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":0,"string":"0"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"6"}}},{"rowIdx":365,"cells":{"seq_id":{"kind":"string","value":"7276876468"},"text":{"kind":"string","value":"from django.db import models\nfrom django.contrib.auth.models import User\n\n\nclass Animal(models.Model):\n \"\"\"Класс описывает объект Животное\"\"\"\n\n owner = models.ForeignKey(User, on_delete=models.CASCADE, verbose_name=\"Владелец\")\n species = models.CharField(max_length=30, verbose_name=\"Вид животного\")\n name = models.CharField(max_length=30, verbose_name=\"Кличка\")\n birth = models.DateField(verbose_name=\"Дата рождения\")\n breed = models.CharField(max_length=30, verbose_name=\"Порода\")\n gender = models.CharField(\n max_length=10, choices=[(\"Ж\", \"Женский\"), (\"М\", \"Мужской\")], verbose_name=\"Пол\"\n )\n\n class Meta:\n verbose_name = \"Животное\"\n verbose_name_plural = \"Животные\"\n\n def __str__(self):\n return self.name\n\n\nclass Vaccination(models.Model):\n \"\"\"Класс описывающий объект Вакцинация\"\"\"\n\n animal = models.ForeignKey(\n Animal, on_delete=models.CASCADE, verbose_name=\"Животное\"\n )\n date = models.DateField(verbose_name=\"Дата прививки\")\n vaccine = models.CharField(max_length=50, verbose_name=\"Вакцина\")\n\n class Meta:\n verbose_name = \"Вакцинация\"\n verbose_name_plural = \"Вакцинация\"\n\n def __str__(self):\n return f\"{self.date}\"\n\n\nclass Treatment(models.Model):\n \"\"\"Класс описывающий объект Обратока от паразитов\"\"\"\n\n animal = models.ForeignKey(\n Animal, on_delete=models.CASCADE, verbose_name=\"Животное\"\n )\n parasite_type = models.CharField(\n max_length=10,\n choices=[(\"Гельминты\", \"Гельминты\"), (\"Клещи\", \"Клещи\")],\n verbose_name=\"Вид паразитов\",\n )\n date = models.DateField(verbose_name=\"Дата обработки\")\n medication = models.CharField(max_length=50, verbose_name=\"Препарат\")\n dosage = models.CharField(max_length=10, verbose_name=\"Дозировка\")\n\n class Meta:\n verbose_name = \"Обработка от паразитов\"\n verbose_name_plural = \"Обработка от паразитов\"\n\n def __str__(self):\n return f\"{self.date}\"\n"},"repo_name":{"kind":"string","value":"Gamilkar/animal_medical_record"},"sub_path":{"kind":"string","value":"main/models.py"},"file_name":{"kind":"string","value":"models.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":2320,"string":"2,320"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"ru"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":0,"string":"0"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"6"}}},{"rowIdx":366,"cells":{"seq_id":{"kind":"string","value":"12510085973"},"text":{"kind":"string","value":"from tqdm import tqdm\nimport math\nimport time\nimport numpy as np\ndef bingliu_mpqa(utterance_tokenized, file):\n feat_ = []\n dict1_bing = {}\n for line in file:\n x = line.split(\"\\t\")\n dict1_bing[x[0] + \"_\" + x[1][:-1]] = 1\n i=0\n for tokens in utterance_tokenized:\n res = np.array([0,0,0,0])\n for token in tokens:\n pos = (token + \"_positive\")\n neg = (token + \"_negative\")\n if (pos in dict1_bing):\n res[0]+=1\n res[1]+=1\n elif (neg in dict1_bing):\n res[1]-=1\n if res[0]>0:\n res[2]=1\n if tokens!=[]:\n pos = tokens[-1] + \"_positive\"\n neg = tokens[-1] + \"_negative\"\n if pos in dict1_bing:\n res[3]=1\n elif neg in dict1_bing:\n res[3]=-1\n feat_.append(res)\n return np.array(feat_)\n\ndef SENT140(X):\n #sentiment140\n dict1_S140 = {}\n with open(\"lexicons/3. Sentiment140-Lexicon-v0.1/unigrams-pmilexicon.txt\", 'r') as fd:\n for line in fd:\n x = line.split(\"\t\")\n dict1_S140[x[0]] = float(x[1])\n \n feat_ = []\n for tokens in X:\n sent140 = [0,0,0,0]\n cnt = 0\n for token in tokens:\n if(\"#\" not in token):\n cnt += 1\n if(token in dict1_S140):\n sent140[0] += (dict1_S140[token] > 0)\n sent140[1] += dict1_S140[token]\n sent140[2] = max(sent140[2],dict1_S140[token])\n if(len(tokens) >= 1 and tokens[-1] in dict1_S140):\n \tsent140[3] = (dict1_S140[tokens[-1]] > 0)\n feat_.append(sent140)\n return np.array(feat_)\n# print()\ndef NRC_EMOTION(X):\n #NRC emotion\n dict1_NRC = {}\n cnt_r = 0\n len1 = 0;\n with open(\"lexicons/6. NRC-10-expanded.csv\", 'r') as fd:\n for line in fd:\n if(cnt_r == 0):\n cnt_r += 1\n continue;\n x = line.split(\"\t\")\n dict1_NRC[x[0]] = [float(i) for i in x[1:]]\n len1 = len(x[1:])\n feat_ = []\n for e,tokens in tqdm(enumerate(X)):\n emo_score = [[0,0,0,0] for i in range(len1)]\n cnt = 0\n for token in tokens:\n if(\"#\" in token):\n continue\n cnt += 1\n if(token in dict1_NRC):\n for i,val in enumerate(dict1_NRC[token]):\n \temo_score[i][0] += (val > 0)\n \temo_score[i][1] += val\n \temo_score[i][2] = max(emo_score[i][2],val)\n if(len(tokens) >= 1 and tokens[-1] in dict1_NRC):\n \tfor i,val in enumerate(dict1_NRC[token]):\n \t\temo_score[i][3] = (val > 0)\n \tres = []\n \tfor i in emo_score:\n \t\tres.extend(i)\n \tfeat_.append(res)\n return np.array(feat_)\n# print()\ndef NRC_HASHTAG_SENT(X):\n #NRC hashtag\n dict1_NRC = {}\n with open(\"lexicons/7. NRC-Hashtag-Sentiment-Lexicon-v0.1/unigrams-pmilexicon.txt\", 'r') as fd:\n for line in fd:\n x = line.split(\"\t\")\n dict1_NRC[x[0]] = float(x[1])\n feat_ = []\n for tokens in X:\n cnt = 0\n f = [0,0,0,0]\n for token in tokens:\n if(\"#\" not in token):\n continue\n cnt += 1\n if(token in dict1_NRC):\n \tf[0] += (dict1_NRC[token] > 0)\n \tf[1] += dict1_NRC[token]\n \tf[2] = max(f[2],dict1_NRC[token])\n \tif(len(tokens) >= 1 and tokens[-1] in dict1_NRC):\n \t\tf[3] = (dict1_NRC[tokens[-1]] > 0)\n feat_.append(f)\n return np.array(feat_)\n\ndef lexicons(utterance_tokenized):\n filebingliu = open(\"lexicons/1. BingLiu.csv\", \"r\")\n filempqa = open(\"lexicons/2. mpqa.txt\", \"r\")\n\n start = time.time()\n bingliu = bingliu_mpqa(utterance_tokenized, filebingliu)\n mpqa = bingliu_mpqa(utterance_tokenized, filempqa)\n sent140 = SENT140(utterance_tokenized)\n nrcemotion = NRC_EMOTION(utterance_tokenized)\n nrchashtag = NRC_HASHTAG_SENT(utterance_tokenized)\n end = time.time()\n print(\"time to calculate lexicons: \", end-start)\n\n # y = len(bingliu[0]) + len([mpqa[0]]) + len(sent140[0]) + len(nrcemotion[0]) + len(nrchashtag[0])\n feature = np.zeros([len(utterance_tokenized), 56])\n for i in range(len(utterance_tokenized)):\n feature[i] = np.concatenate((bingliu[i], mpqa[i], sent140[i], nrcemotion[i], nrchashtag[i]))\n return feature\n\nif __name__=='__main__':\n lexicons(utterance_tokenized)\n\n\n\n\n"},"repo_name":{"kind":"string","value":"hamzah70/Multi_Modal_Emotion_Analysis"},"sub_path":{"kind":"string","value":"lexiconFeatureVector.py"},"file_name":{"kind":"string","value":"lexiconFeatureVector.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":4491,"string":"4,491"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"en"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":0,"string":"0"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"6"}}},{"rowIdx":367,"cells":{"seq_id":{"kind":"string","value":"38353405555"},"text":{"kind":"string","value":"import requests\nfrom bs4 import BeautifulSoup #screen-scraping library\n\n\n#request = requests.get(\"http://www.google.com\")\nrequest = requests.get(\"https://www.johnlewis.com/house-by-john-lewis-curve-dining-chair-white/p231441579\")\ncontent = request.content #getting content of the page\nsoup = BeautifulSoup(content, \"html.parser\")\nelement = soup.find(\"span\",{\"itemprop\":\"price\",\"class\":\"now-price\"}) #dictionary\n#print(element.text.strip())\nstring_price = element.text.strip() #\"#£19.00\"\n\nprice_without_symbol = string_price[1:]\n\nprice = (float(price_without_symbol))\n\nif price < 50:\n print(\"You should buy the chair!\")\n print(\"The current price is {}.\".format(string_price))\nelse:\n print(\"Don't buy the chair!!\")\n\n\n\n# £19.00 \n\n#print(request.content)\n"},"repo_name":{"kind":"string","value":"BrayoKane/python-mongo"},"sub_path":{"kind":"string","value":"price-of-a-chair/src/app.py"},"file_name":{"kind":"string","value":"app.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":811,"string":"811"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"en"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":0,"string":"0"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"6"}}},{"rowIdx":368,"cells":{"seq_id":{"kind":"string","value":"74022415547"},"text":{"kind":"string","value":"from rest_framework import status\nfrom rest_framework.decorators import action\nfrom rest_framework.permissions import AllowAny\nfrom rest_framework.response import Response\n\nfrom apps.celery_task.models import PeriodicTask\nfrom apps.celery_task.serializers.periodic_task_serializer import PeriodicTaskSerializer, CreatePeriodicTaskSerializer\nfrom packages.drf.pagination import CustomPageNumberPagination\nfrom packages.drf.renderers import CustomRenderer\nfrom packages.drf.viewsets import ModelViewSet\nfrom django_filters import FilterSet\n\n\nclass PeriodicTaskFilter(FilterSet):\n class Meta:\n model = PeriodicTask\n fields = {\"name\": [\"exact\"], \"creator\": [\"contains\"]}\n\n\nclass PeriodicTaskViewSet(ModelViewSet):\n permission_classes = [AllowAny]\n queryset = PeriodicTask.objects.all()\n serializer_class = PeriodicTaskSerializer\n pagination_class = CustomPageNumberPagination\n renderer_classes = (CustomRenderer,)\n filter_class = PeriodicTaskFilter\n ordering_fields = [\"id\"]\n ordering = [\"-id\"]\n\n def create(self, request, *args, **kwargs):\n serializer = CreatePeriodicTaskSerializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n name = serializer.validated_data[\"name\"]\n creator = \"test\"\n serializer.validated_data[\"name\"] = name\n serializer.validated_data[\"creator\"] = creator\n instance = serializer.save()\n instance.set_enabled(True)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n\n @action(methods=[\"post\"], detail=False)\n def create_task(self, request, *args, **kwargs):\n \"\"\"创建任务\n {\n \"name\": \"test\",\n \"cron\": {\"minute\":\"*/5\",\"hour\":\"*\",\"day_of_week\":\"*\",\"day_of_month\":\"*\",\"month_of_year\":\"*\"},\n }\n \"\"\"\n params = request.data\n cron_data = params.get(\"cron\")\n name = params.get(\"name\")\n creator = params.get(\"creator\", \"test\")\n periodic_task = PeriodicTask.objects.create_task(name, cron_data, creator)\n periodic_task.set_enabled(True)\n return Response({\"result\": \"创建成功\"})\n"},"repo_name":{"kind":"string","value":"yaowuya/django-major-core"},"sub_path":{"kind":"string","value":"apps/celery_task/views/periodic_task_view.py"},"file_name":{"kind":"string","value":"periodic_task_view.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":2133,"string":"2,133"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"en"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":0,"string":"0"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"6"}}},{"rowIdx":369,"cells":{"seq_id":{"kind":"string","value":"18959826347"},"text":{"kind":"string","value":"from rest_framework.decorators import api_view, permission_classes\nimport random\nimport string\nfrom pprint import pprint as pp\n\nimport requests\nfrom allauth.account.models import EmailAddress\nfrom rest_framework import status\nfrom rest_framework.decorators import api_view, permission_classes\nfrom rest_framework.permissions import AllowAny\nfrom rest_framework.response import Response\n\nfrom points.views import new_user_point\nfrom .serializers import *\n\nUser = get_user_model()\n\n\n@api_view(['POST'])\n@permission_classes([AllowAny])\ndef kakao_login_and_get_userinfo(request):\n code = request.data.get('code')\n headers = {\n 'Content-type': 'application/x-www-form-urlencoded',\n }\n body = {\n 'grant_type': 'authorization_code',\n 'client_id': 'dcf8cc38ec4e7ec39baf6207a53ed140',\n 'redirect_uri': 'https://kickin.kr/loading/',\n 'code': code,\n }\n response = requests.post(headers=headers, url='https://kauth.kakao.com/oauth/token', data=body)\n access_token = response.json().get('access_token')\n\n headers = {\n 'Authorization': f'Bearer {access_token}',\n 'Content-type': 'application/x-www-form-urlencoded;charset=utf-8',\n }\n\n info_request = requests.get(url='https://kapi.kakao.com/v2/user/me', headers=headers)\n info_res = info_request.json()\n\n nickname = info_res.get('properties').get('nickname')\n email = info_res.get('kakao_account').get('email')\n\n # 해당 이메일을 사용해 가입한 이력이 있는지, 확인한다.\n\n # 해당 이메일로 가입한 이력이 없다면, 새로운 유저를 생성한다.\n user = User.objects.filter(email=email)\n if not user:\n user = User.objects.create_user(email=email, password='Kakao_' + nickname + '977')\n user.login_type = 1\n user.save()\n\n # 카카오 로그인의 경우 별도의 이메일 인증을 생략\n EmailAddress.objects.create(user=user, email=email, verified=True, primary=True)\n\n # 해당 유저의 정보를 업데이트한다. : login_type = 1 (카카오 로그인)\n # user Info 생성\n user_info, user_created = UserInfo.objects.get_or_create(user=user)\n new_user_point(user.id) # 해당 유저의 포인트를 생성한다.\n\n # 소셜 로그인 정보는, 언제든 바뀔 수 았기 때문에 굳이 저장하지 않는다.\n kakao_profile = info_res.get('kakao_account').get('profile').get('profile_image_url')\n kakao_nickname = info_res.get('properties').get('nickname')\n\n # 로그인 응답 데이터 생성\n response_data = {\n 'kakao_profile': kakao_profile,\n 'kakao_nickname': kakao_nickname,\n 'kakao_email': email, # 로그인 처리를 위해 응답 데이터에 이메일을 포함시킨다. / 비밀번호는 패턴화 되어있다. (Kakao_ + nickname + 977)\n }\n\n\n return Response(data=response_data, status=status.HTTP_200_OK)\n\n\n@api_view(['POST'])\n@permission_classes([AllowAny])\ndef kakao_test(request):\n code = request.data.get('code')\n headers = {\n 'Content-type': 'application/x-www-form-urlencoded',\n }\n body = {\n 'grant_type': 'authorization_code',\n 'client_id': 'dcf8cc38ec4e7ec39baf6207a53ed140',\n 'redirect_uri': 'http://localhost:8080/loading/',\n 'code': code,\n }\n response = requests.post(headers=headers, url='https://kauth.kakao.com/oauth/token', data=body)\n pp(response.json())\n access_token = response.json().get('access_token')\n\n headers = {\n 'Authorization': f'Bearer {access_token}',\n 'Content-type': 'application/x-www-form-urlencoded;charset=utf-8',\n }\n\n info_request = requests.get(url='https://kapi.kakao.com/v2/user/me', headers=headers)\n info_res = info_request.json()\n\n pp(info_res)\n\n return Response(data=info_res, status=status.HTTP_200_OK)\n"},"repo_name":{"kind":"string","value":"isaacShin-dev/kickin"},"sub_path":{"kind":"string","value":"accounts/social_views.py"},"file_name":{"kind":"string","value":"social_views.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":3846,"string":"3,846"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"ko"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":0,"string":"0"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"6"}}},{"rowIdx":370,"cells":{"seq_id":{"kind":"string","value":"20093575148"},"text":{"kind":"string","value":"# General\nimport os\n\n# Tools/utils\nimport itertools\nimport multiprocessing\nfrom tqdm.notebook import tqdm\nfrom tqdm import tqdm as tqdm_cli\nfrom functools import reduce # for aggregate functions\nfrom itertools import chain # for aggregate functions\n\n# Data management\nimport math\nimport numpy as np\nimport pandas as pd\nimport networkx as nx\nimport igraph as ig\nimport leidenalg as la\nfrom community import community_louvain\n\n# Visualization\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport pygraphviz as pgv\nimport colorcet as cc\nfrom matplotlib.colors import ListedColormap\nfrom wordcloud import WordCloud, STOPWORDS\nfrom termcolor import colored # colored text output\n\nfrom sklearn.preprocessing import MinMaxScaler\n\n\nstopwords = STOPWORDS.union({\n 'regulation', 'activity', 'positive', 'negative', 'catabolic', 'process', 'protein', 'complex', \n 'binding', 'response', 'gene', 'genes', 'encoding', 'defining', 'GeneID', 'regulated',\n})\n\n \ndef get_tf_targ_ctx(df):\n tf_target_dict = {'TF': [], 'target': [], 'importance': []}\n tf_target_info = (\n df.droplevel(axis=0, level=1).droplevel(axis=1, level=0)['TargetGenes']\n .map(set) # transform each list into set\n .groupby('TF').agg(lambda x: reduce(lambda a, b: a.union(b), x)) # combine all targets per TF\n )\n for tf, target_info in tf_target_info.iteritems():\n tf_target_dict['TF'] += [tf for target_name, score in target_info]\n tf_target_dict['target'] += [target_name for target_name, score in target_info]\n tf_target_dict['importance'] += [score for target_name, score in target_info]\n return pd.DataFrame(tf_target_dict)\n\n\ndef netgraph_community_layout(G, node_to_community, community_scale=1., node_scale=2., seed=42):\n \"\"\"\n Compute the node positions for a modular graph.\n \"\"\"\n\n # assert that there multiple communities in the graph; otherwise abort\n communities = set(node_to_community.values())\n if len(communities) < 2:\n warnings.warn(\"Graph contains a single community. Unable to compute a community layout. Computing spring layout instead.\")\n return nx.spring_layout(G, weight='importance', **kwargs)\n\n community_size = _get_community_sizes(node_to_community)\n community_centroids = _get_community_positions(G, node_to_community, community_scale, seed=seed)\n relative_node_positions = _get_node_positions(G, node_to_community, node_scale, seed=seed)\n\n # combine positions\n node_positions = dict()\n for node, community in node_to_community.items():\n xy = community_centroids[node]\n delta = relative_node_positions[node] * community_size[community]\n node_positions[node] = xy + delta\n\n return node_positions\n\n\ndef _get_community_sizes(node_to_community):\n \"\"\"\n Compute the area of the canvas reserved for each community.\n \"\"\"\n \n def _invert_dict(mydict):\n \"\"\"Invert a dictionary such that values map to keys.\"\"\"\n inverse = dict()\n for key, value in mydict.items():\n inverse.setdefault(value, set()).add(key)\n return inverse\n \n scale = (1, 1)\n \n total_nodes = len(node_to_community)\n max_radius = np.linalg.norm(scale) / 2\n scalar = max_radius / total_nodes\n community_to_nodes = _invert_dict(node_to_community)\n community_size = {community : len(nodes) * scalar for community, nodes in community_to_nodes.items()}\n \n return community_size\n\n\ndef _get_community_positions(G, node_to_community, community_scale, seed, simple=True):\n \"\"\"\n Compute a centroid position for each community.\n \"\"\"\n \n # create a weighted graph, in which each node corresponds to a community,\n # and each edge weight to the number of edges between communities\n between_community_edges = _find_between_community_edges(G, node_to_community)\n\n communities = set(node_to_community.values())\n hypergraph = nx.DiGraph()\n hypergraph.add_nodes_from(communities)\n \n if not simple: \n for (ci, cj), edges in between_community_edges.items():\n hypergraph.add_edge(ci, cj, weight=len(edges))\n\n # find layout for communities\n pos_communities = nx.spring_layout(hypergraph, scale=community_scale, seed=seed)\n\n # set node positions to position of community\n pos = dict()\n for node, community in node_to_community.items():\n pos[node] = pos_communities[community]\n\n return pos\n\ndef _find_between_community_edges(G, node_to_community, fixed_community=None):\n \"\"\"Convert the graph into a weighted network of communities.\"\"\"\n edges = dict()\n\n for (ni, nj) in G.edges():\n ci = node_to_community[ni]\n cj = node_to_community[nj]\n \n if fixed_community is not None:\n if fixed_community != ci and fixed_community != cj:\n continue\n\n if ci != cj:\n try:\n edges[(ci, cj)] += [(ni, nj)]\n except KeyError:\n edges[(ci, cj)] = [(ni, nj)]\n\n return edges\n\ndef _get_node_positions(G, node_to_community, node_scale, seed):\n \"\"\"\n Positions nodes within communities.\n \"\"\"\n communities = dict()\n for node, community in node_to_community.items():\n try:\n communities[community] += [node]\n except KeyError:\n communities[community] = [node]\n\n pos = dict()\n for ci, nodes in communities.items():\n subgraph = G.subgraph(nodes)\n pos_subgraph = nx.spring_layout(subgraph, weight='importance', scale=node_scale, seed=seed)\n pos.update(pos_subgraph)\n\n return pos\n\ndef squeeze_graph(G, partition, approximate_size=4000):\n \"\"\"\n Squeeze graph by picking only top nodes (according to number of connections) in each partition. This\n step is needed to speed up the networkx visualization and show only the general POV on the graph.\n \"\"\"\n \n #### STEP 1 - filtering nodes\n \n # Getting the number of partitions\n num_partitions = len(set(partition.values()))\n \n # Getting partition parameters\n partition_sizes = {i: len([1 for node, k in partition.items() if k == i]) for i in range(num_partitions)}\n min_partition_size = min(partition_sizes.values())\n \n # Normalizing partition size: divide each partition size by the minimal partition size\n normalized_partition_size = {i: (size // min_partition_size) for i, size in partition_sizes.items()}\n \n # Getting scale factor - to get approximately size of the graph close to approximate_size\n scale_factor = math.ceil(approximate_size / sum(normalized_partition_size.values()))\n squeezed_partition = {i: (size * scale_factor) for i, size in normalized_partition_size.items()}\n \n top_nodes = []\n for i, num_nodes in squeezed_partition.items():\n # Getting partition graph\n partition_i = G.subgraph([node for node, k in partition.items() if k == i])\n \n # Finding inter-community edges\n intercommunity_edges = _find_between_community_edges(G, partition, i)\n \n # Calculating node importance according to number of inter-community edges\n node_importance = {}\n for (part_1, part_2), edges in intercommunity_edges.items():\n for node_1, node_2 in edges:\n curr_node = node_1 if part_1 == i else node_2\n if curr_node in node_importance:\n node_importance[curr_node] += 1\n else:\n node_importance[curr_node] = 1\n \n # Getting top nodes in the partition according to maximum number of inter-community edge (node_importance)\n top_nodes += list(dict(sorted(node_importance.items(), key=lambda x: x[1], reverse=True)[:squeezed_partition[i]]).keys())\n \n filtered_partition = {node: i for node, i in partition.items() if node in top_nodes}\n filtered_G = G.subgraph(top_nodes)\n \n #### STEP 2 - filtering edges\n \n # Setting up the size of the squeezed graph (number of edges)\n keep_num_edges = 20000\n edges_to_keep = \\\n list(\n dict(\n sorted(\n {\n (st, end): data['importance'] for st, end, data in filtered_G.edges(data=True)\n }.items(), key=lambda x: x[1], reverse=True)[:keep_num_edges]\n ).keys()\n )\n squeezed_G = filtered_G.edge_subgraph(edges_to_keep)\n squeezed_partition = {node: i for node, i in filtered_partition.items() if node in squeezed_G.nodes()}\n \n return squeezed_G, squeezed_partition\n\n\ndef get_elipsis_mask():\n h, w = 600, 800\n center = (int(w/2), int(h/2))\n radius_x = w // 2\n radius_y = h // 2\n\n Y, X = np.ogrid[:h, :w]\n mask = ((X - center[0])**2/radius_x**2 + (Y - center[1])**2/radius_y**2 >= 1)*255\n\n return mask\n\n\ndef plot_cloud(G, partition, squeezed_pos, ax, anno_db, filter_genes=True, \n limit_anno_until=50, display_func=False, if_betweenness=True, \n k=3000): \n \"\"\"\n Plot word cloud that indicates the function(s) of each gene cluster.\n \"\"\"\n \n # Loading the gene functional annotation\n gene_func = load_gene_func_db(anno_db, reload=False, as_series=True)\n \n # Reversing partition dict -> {group_1: [gene_1, gene_2, ...], group_2: [gene_3, gene_4, ...], ...}\n partition_genes_ = {}\n for gene, i in partition.items():\n if i not in partition_genes_.keys():\n partition_genes_[i] = [gene]\n else:\n partition_genes_[i] += [gene]\n \n # If display gene function in the word clouds\n if display_func:\n \n # Whether to filter the genes on which we compute the word cloud (most important genes)\n if filter_genes:\n compute_centrality = nx.betweenness_centrality if if_betweenness else nx.closeness_centrality\n distance_metric = {'weight': 'distance'} if if_betweenness else {'distance': 'distance'}\n partition_genes = {}\n t = tqdm(partition_genes_.items())\n for i, genes in t:\n t.set_description(f'Processing cluster {i}, size={G.subgraph(genes).order()}')\n top_len = min(limit_anno_until, len(genes))\n top_gene_scores = dict(\n sorted(\n compute_centrality(\n G.subgraph(genes), k=min(G.subgraph(genes).order(), k), **distance_metric\n ).items(), \n key=lambda x: x[1], reverse=True\n )[:top_len]\n )\n # Renormalizing centrality scores between 1 and 100, and rounding them to use later when \n # displaying wordclouds (higher score - higher \"frequency\" or word size)\n norm_top_gene_scores = dict(\n zip(\n top_gene_scores.keys(), list(map(lambda x: int(x), scale(list(top_gene_scores.values()), 1, 100)))\n )\n )\n partition_genes[i] = norm_top_gene_scores\n print('Filtered genes for generating the function word cloud..')\n else:\n partition_genes = {{gene_: 1 for gene_ in gene_list} for i, gene_list in partition_genes_.items()}\n \n # Computing functional annotation for each cluster as a concatenated list of annotations\n # Each annotation is weighted by its duplication gene_score times (e.g. a gene has score = 2 -> \n # the functional annotation is duplicated and have bigger font in WordCloud)\n partition_funcs = {\n i: ' '.join(\n chain.from_iterable([\n gene_func[gene_func.index == gene].to_list()*gene_score \n for gene, gene_score in gene_score_list.items()\n ])) for i, gene_score_list in partition_genes.items()\n }\n\n # Generating word counts from aggregated gene annotation texts -> obtaining main (most frequent) function tokens\n word_counts = {i: WordCloud(max_words=30, min_font_size=15, stopwords=stopwords).process_text(text) for i, text in partition_funcs.items()}\n word_counts = {\n i: (freqs if freqs else {'no found function': 1}) for i, freqs in word_counts.items()\n } # dealing with no word case\n wordclouds = {\n i: WordCloud(\n max_words=30, min_font_size=15, stopwords=stopwords, background_color='white', mask=get_elipsis_mask()\n ).generate_from_frequencies(freqs) for i, freqs in word_counts.items()\n }\n \n # Display main genes in decreasing order of importance (top `top_len` genes)\n else:\n \n compute_centrality = nx.betweenness_centrality if if_betweenness else nx.closeness_centrality\n distance_metric = {'weight': 'distance'} if if_betweenness else {'distance': 'distance'}\n partition_genes = {}\n t = tqdm(partition_genes_.items())\n for i, genes in t:\n t.set_description(f'Processing cluster {i}, size={G.subgraph(genes).order()}')\n top_len = min(limit_anno_until, len(genes))\n top_gene_scores = dict(\n sorted(\n compute_centrality(\n G.subgraph(genes), k=min(G.subgraph(genes).order(), k), **distance_metric\n ).items(), \n key=lambda x: x[1], reverse=True\n )[:top_len]\n )\n # Renormalizing centrality scores between 1 and 100, and rounding them to use later when \n # displaying wordclouds (higher score - higher \"frequency\" or word size)\n norm_top_gene_scores = dict(\n zip(\n top_gene_scores.keys(), list(map(lambda x: int(x), scale(list(top_gene_scores.values()), 1, 100)))\n )\n )\n partition_genes[i] = norm_top_gene_scores\n print('Obtained top genes for generating the gene word cloud..')\n \n wordclouds = {\n i: WordCloud(\n max_words=30, min_font_size=15, background_color='white', mask=get_elipsis_mask()\n ).generate_from_frequencies(gene_score_dict) for i, gene_score_dict in partition_genes.items()\n }\n \n \n # Plotting\n partition_coords = {}\n for gene, coords in squeezed_pos.items():\n if partition[gene] not in partition_coords:\n partition_coords[partition[gene]] = [coords]\n else:\n partition_coords[partition[gene]] += [coords]\n for i, coords in partition_coords.items():\n x, y = zip(*coords)\n min_x, max_x = min(x), max(x)\n min_y, max_y = min(y), max(y)\n ax.imshow(wordclouds[i], interpolation='bilinear', extent=[min_x, max_x, min_y, max_y])\n \n return ax\n\n \ndef process_communities(data, pat=None, algo='leiden', filter_quantile=0.95, if_betweenness=True, \n limit_anno_until=50, k=5000, save_top_intercommunity_links_until=20, \n other_functions_until=20, save_top_new_found_cluster_links=20, seed=42):\n \"\"\"\n Process graph by finding its communities, annotate its communities, and save everything into .tsv format.\n \"\"\"\n \n from joblib import Parallel, delayed\n \n def highlight_TFs(word, font_size, position, orientation, font_path, random_state):\n TF_color = (255, 0, 0) # red\n if word in lambert_TF_names or word in dorothea_TF_names:\n return TF_color\n else:\n r, g, b, alpha = plt.get_cmap('viridis')(font_size / 120)\n return (int(r * 255), int(g * 255), int(b * 255))\n \n print('\\nPerforming community analysis..\\n\\n')\n \n # Setting pathways to files\n _PROJ_PATH = '/gpfs/projects/bsc08/bsc08890'\n _FMETA = os.path.join(_PROJ_PATH, 'data/GSE145926_RAW/metadata.tsv')\n _DATA_HOME = os.path.join(_PROJ_PATH, 'res/covid_19')\n\n # Loading sample meta data, reordering patients\n full_meta = pd.read_csv(_FMETA, sep='\\t', index_col=0)\n \n # Prepare everything to save the figs and dataframe\n if data == 'all_data':\n data = 'raw_data'\n elif 'raw_data_' not in data:\n data = f'raw_data_{data}'\n else:\n pass\n \n if pat is None or pat == 'all_data':\n \n # Cell-type aggregated data\n data_folder = 'all_data' if data == 'raw_data' else data.replace('raw_data_', '')\n \n figs_as = os.path.join(_DATA_HOME, 'cell_types', data_folder, 'figs', 'grnboost2', f'raw_data')\n \n data_to = os.path.join(_DATA_HOME, 'cell_types', data_folder, 'data', 'grnboost2', f'{algo}_communities')\n data_as = os.path.join(data_to, f'raw_data_communities_info.pickle')\n \n elif pat in ['C', 'M', 'S']:\n \n # Patient-type aggregated data\n data_folder = 'all_data' if data == 'raw_data' else data.replace('raw_data_', '')\n \n figs_as = os.path.join(_DATA_HOME, 'cell_types', data_folder, 'figs', 'grnboost2', \n f'raw_data_{pat}_type')\n \n data_to = os.path.join(_DATA_HOME, 'cell_types', data_folder, 'data', 'grnboost2', f'{algo}_communities')\n data_as = os.path.join(data_to, f'raw_data_{pat}_type_communities_info.pickle')\n \n else:\n \n # Loading patient-specific data\n figs_as = os.path.join(_DATA_HOME, pat, 'figs', 'grnboost2', f'{data}')\n \n data_to = os.path.join(_DATA_HOME, pat, 'data', 'grnboost2', f'{algo}_communities')\n data_as = os.path.join(data_to, f'{data}_communities_info.pickle')\n \n os.makedirs(data_to, exist_ok=True)\n os.makedirs(os.path.dirname(figs_as), exist_ok=True)\n \n # Loading lists of TFs from Lambert 2018 and DoRothEA, in the latter case we will keep only confident regulons\n lambert_TF_names = pd.read_csv(os.path.join(_PROJ_PATH, 'data/TF_lists/lambert2018.txt'), header=None)[0].to_list()\n dorothea_TF_names = list(\n pd.read_csv(os.path.join(_PROJ_PATH, 'data/TF_lists/dorothea_regulons.tsv'), sep='\\t') \\\n .loc[lambda x: x['confidence'].isin(['A', 'B', 'C'])]['tf'].unique()\n )\n \n # Loading the graph\n G = get_nx_graph(data=data, data_type='all', pat=pat, get_filtered=filter_quantile)\n print(f\"Loaded the graph: {colored('pat', 'green')}='{colored(pat, 'red')}', \"\n f\"{colored('data', 'green')}='{colored(data, 'red')}', \"\n f\"{colored('data_type', 'green')}='{colored('all', 'red')}'\\n\")\n \n \n ###### FINDING COMMUNITIES IN THE GRAPH #######\n \n print('Finding communities in the graph..')\n \n if algo == 'louvain':\n partition = community_louvain.best_partition(G.to_undirected(), weight='importance', random_state=seed)\n elif algo == 'leiden':\n G_igraph = ig.Graph.from_networkx(G.to_undirected())\n la_partition = la.find_partition(G_igraph, la.ModularityVertexPartition, weights='importance', seed=seed)\n partition = {G_igraph.vs[node]['_nx_name']: i for i, cluster_nodes in enumerate(la_partition) for node in cluster_nodes}\n else:\n raise NotImplementedError\n \n num_partitions = len(set(partition.values()))\n print(f'Number of partitions using {algo} algorithm: {colored(num_partitions, \"cyan\")}\\n')\n \n \n ###### FINDING HIGH-CENTRALITY GENES IN THE WHOLE GRAPH\n \n print('Finding high-centrality genes in the whole graph..')\n \n num_workers = max(multiprocessing.cpu_count() // 2, 1)\n whole_G_central_genes = dict(\n sorted(betweenness_centrality_parallel(G, processes=num_workers).items(), key=lambda x: x[1], reverse=True)[:limit_anno_until]\n )\n print(f'Computed the {\"betweenness\" if if_betweenness else \"closeness\"} centrality for all genes in the graph\\n')\n \n ###### FINDING HIGH-CENTRALITY GENES AND CORRESPONDING FUNCTIONS IN EACH COMMUNITY USING GO ANNOTATION ######\n \n print('Finding high-centrality genes/functions in each cluster..')\n \n # Loading the gene functional annotation\n anno_db_tags = ['GO', 'KEGG', 'immunological', 'hallmark']\n gene_func_dbs = {tag: load_gene_func_db(tag, as_series=True) for tag in anno_db_tags}\n \n # Reversing partition dict -> {group_1: [gene_1, gene_2, ...], group_2: [gene_3, gene_4, ...], ...}\n partition_genes_ = {}\n for gene, i in partition.items():\n if i not in partition_genes_.keys():\n partition_genes_[i] = [gene]\n else:\n partition_genes_[i] += [gene]\n\n # Whether to filter the genes on which we compute the word cloud (most important genes)\n compute_centrality = nx.betweenness_centrality if if_betweenness else nx.closeness_centrality\n distance_metric = {'weight': 'distance'} if if_betweenness else {'distance': 'distance'}\n all_partition_genes = {}\n norm_partition_genes = {}\n t = tqdm_cli(partition_genes_.items(), ascii=True)\n for i, genes in t:\n t.set_description(f'Processing cluster {i}, size={G.subgraph(genes).order()}')\n gene_scores = dict(\n sorted(\n compute_centrality(\n G.subgraph(genes), k=min(G.subgraph(genes).order(), k), normalized=True, **distance_metric\n ).items(), \n key=lambda x: x[1], reverse=True\n )\n )\n all_partition_genes[i] = gene_scores\n central_gene_scores = {gene: gene_scores[gene] for k, gene in enumerate(gene_scores.keys()) if k < limit_anno_until}\n \n # Renormalizing centrality scores between 1 and 100, and rounding them to use later when \n # displaying wordclouds (higher score - higher \"frequency\" or word size)\n norm_partition_genes[i] = dict(\n zip(\n central_gene_scores.keys(), \n list(map(lambda x: int(x), scale(list(central_gene_scores.values()), 1, 100)))\n )\n )\n print('Computed centrality scores for each gene in each community\\n')\n \n print('Finding functional annotations for each cluster..')\n \n # Computing functional annotation for each cluster as a concatenated list of annotations\n # Each annotation is weighted by its duplication gene_score times (e.g. a gene has score = 2 -> \n # the functional annotation is duplicated and have bigger font in WordCloud)\n # We also do it for different functional annotations like GO, KEGG, Hallmark, etc..\n partition_funcs = {\n tag: \n {\n i: ' '.join(\n chain.from_iterable([\n gene_func[gene_func.index == gene].to_list()*gene_score \n for gene, gene_score in gene_score_list.items()\n ])) for i, gene_score_list in norm_partition_genes.items()\n } for tag, gene_func in gene_func_dbs.items()\n }\n \n print('Computed functional annotations for each cluster\\n')\n\n \n ###### PLOTTING GENE AND FUNC COMMUNITY CLOUDS ######\n \n print('Plotting clusters..')\n \n # Getting positions of squeezed graph - we do not plot every gene on the figure\n squeezed_G, squeezed_partition = squeeze_graph(G, partition)\n print('Computed a squeezed graph representation..')\n \n squeezed_pos = netgraph_community_layout(squeezed_G, squeezed_partition, seed=seed) # nx.nx_agraph.pygraphviz_layout(G.to_undirected(), prog=\"sfdp\") # nx.nx.spring_layout(G, seed=seed, k=0.2, iterations=20)\n partition_coords = {}\n for gene, coords in squeezed_pos.items():\n if partition[gene] not in partition_coords:\n partition_coords[partition[gene]] = [coords]\n else:\n partition_coords[partition[gene]] += [coords]\n print('Computed node positions of the squeezed graph representation..')\n \n cmap = ListedColormap(sns.color_palette(cc.glasbey_bw, n_colors=num_partitions).as_hex())\n \n for plot_type in ['genes'] + list(map(lambda x: f\"func_{x}\", anno_db_tags)):\n \n if plot_type.startswith('func'):\n # Getting current functional annotation\n curr_partition_funcs = partition_funcs[plot_type[plot_type.find('_') + 1:]]\n \n f, ax = plt.subplots(figsize=(20, 35))\n \n if plot_type == 'genes':\n wordclouds = {\n i: WordCloud(\n max_words=30, min_font_size=15, background_color='white', mask=get_elipsis_mask()\n ).generate_from_frequencies(gene_score_dict).recolor(color_func=highlight_TFs) \n for i, gene_score_dict in norm_partition_genes.items()\n }\n else:\n word_counts = {\n i: WordCloud(max_words=30, min_font_size=15, stopwords=stopwords).process_text(text) for i, text in curr_partition_funcs.items()\n }\n word_counts = {\n i: (freqs if freqs else {'no found function': 1}) for i, freqs in word_counts.items()\n } # dealing with no word case\n wordclouds = {\n i: WordCloud(\n max_words=30, min_font_size=15, stopwords=stopwords, background_color='white', mask=get_elipsis_mask()\n ).generate_from_frequencies(freqs) for i, freqs in word_counts.items()\n }\n \n # Plotting clouds\n for i, coords in partition_coords.items():\n x, y = zip(*coords)\n min_x, max_x = min(x), max(x)\n min_y, max_y = min(y), max(y)\n ax.imshow(wordclouds[i], interpolation='bilinear', extent=[min_x, max_x, min_y, max_y])\n print(f'Finished plotting {plot_type} word cloud..')\n \n nx.draw(squeezed_G, squeezed_pos, ax=ax, arrowstyle=\"->\", arrowsize=20, \n connectionstyle=f'arc3, rad = 0.25', edge_color='gray', width=0.4, \n node_color='k', node_size=50, alpha=0.02)\n nx.draw_networkx_nodes(squeezed_G, squeezed_pos, ax=ax, node_size=100, \n nodelist=list(squeezed_partition.keys()), \n node_color=list(squeezed_partition.values()), \n cmap=cmap, alpha=0.005)\n print(f'Finished plotting {plot_type} nodes..')\n\n ax.set_title(f'Found communities ({pat}, \"all\", {data}), '\n f'annotation - {plot_type}', \n fontsize=30)\n plt.axis('off')\n\n plt.savefig(f'{figs_as}_{plot_type}.png', bbox_inches='tight', dpi=400)\n \n print('Finished plotting..\\n')\n \n \n ###### SAVING DATAFRAME CONTAINING INFORMATION ABOUT EACH COMMUNITY ######\n\n def compute_community_info(i):\n \"\"\"\n Parallel saving of the dataframe.\n \"\"\"\n\n # Getting information for each community\n genes = list(all_partition_genes[i].keys())\n community_subgraph = G.subgraph(genes)\n\n communities_i = pd.Series(dtype='object')\n\n # Setting tqdm logs\n # t.set_description(f'Saving info about {i} cluster, size={community_subgraph.order()}')\n\n # Getting information about cluster genes\n central_genes_and_scores = {\n gene: all_partition_genes[i][gene] for k, gene in enumerate(genes) if k < limit_anno_until\n }\n\n non_lambert_TFs = [\n f'{gene} (rank={k})' for k, gene in enumerate(central_genes_and_scores.keys(), start=1) if gene not in lambert_TF_names\n ]\n non_dorothea_TFs = [\n f'{gene} (rank={k})' for k, gene in enumerate(central_genes_and_scores.keys(), start=1) if gene not in dorothea_TF_names\n ]\n\n # Filling dataframe with the information\n communities_i['num_nodes'] = community_subgraph.number_of_nodes()\n communities_i['num_edges'] = community_subgraph.number_of_edges()\n communities_i['all_sorted_genes'] = '; '.join(\n f'{gene} (score={score})' for gene, score in all_partition_genes[i].items()\n )\n communities_i['sorted_central_genes_scores'] = '; '.join(\n f'{gene} (score={score:.2f})' for gene, score in central_genes_and_scores.items()\n )\n communities_i['non_lambert_2018_TF_central_genes'] = '; '.join(non_lambert_TFs)\n communities_i['non_dorothea_TF_central_genes'] = '; '.join(non_dorothea_TFs)\n communities_i['whole_G_central_genes_scores'] = '; '.join(\n f'{gene} (score={score:.2f})' for gene, score in whole_G_central_genes.items()\n )\n\n # Filling information about newly found gene-gene links (based on absence in KEGG and Hallmark)\n top_cluster_links = set()\n\n iter_i = 0\n\n for st, end, edge_info in sorted(community_subgraph.edges(data=True), \n key=lambda t: t[2]['importance'], \n reverse=True):\n\n # If the current (reverse directed) link was not encountered previously..\n if (end, st) not in [(uniq_st, uniq_end) for uniq_st, uniq_end, _ in top_cluster_links]:\n top_cluster_links.add((st, end, edge_info['importance']))\n iter_i += 1\n if iter_i == save_top_new_found_cluster_links:\n break\n\n for anno_tag in ['KEGG', 'hallmark']:\n\n curr_db = load_gene_func_db(anno_tag)\n tmp_list = []\n\n # if `st` gene and `end` gene have non-overlapping annotations..\n for st, end, imp in top_cluster_links:\n st_anno_IDs = set(curr_db[curr_db.index == st]['ID'])\n end_anno_IDs = set(curr_db[curr_db.index == end]['ID'])\n if len(st_anno_IDs.intersection(end_anno_IDs)) == 0 and \\\n (len(st_anno_IDs) != 0 or len(end_anno_IDs) != 0):\n tmp_list.append(f\"{st} ({' & '.join(st_anno_IDs)}) <-> {end} ({' & '.join(end_anno_IDs)})\")\n\n communities_i[f'new_gene_gene_links_{anno_tag}'] = '; '.join(tmp_list)\n\n # Filling information about cluster functions\n for tag, gene_func in gene_func_dbs.items():\n\n curr_partition_funcs = partition_funcs[tag]\n\n # Filling main functions - non duplicates at the top \n main_functions = list(dict.fromkeys([ # dropping duplicates, but preserving order\n func for gene in central_genes_and_scores.keys() \n for func in gene_func[gene_func.index == gene].to_list()\n ]))\n gene_with_main_functions = [\n ','.join(\n gene_func[gene_func == func].loc[lambda x: x.index.isin(genes)].index.to_list()\n ) for func in main_functions\n ]\n main_functions = [\n f'>>> {func} <<<: {gene}' for gene, func in zip(gene_with_main_functions, main_functions)\n ]\n communities_i[f'main_functions_{tag}'] = '; '.join(main_functions) # saving..\n\n # Saving functions corresponding to each gene\n central_functions_per_gene = [\n f\">>> {gene} <<<: {' & '.join(gene_func[gene_func.index == gene].to_list())}\" for gene in central_genes_and_scores.keys()\n ]\n communities_i[f'sorted_central_functions_{tag}'] = '; '.join(central_functions_per_gene) # saving..\n\n # Saving most frequent function words\n freq_words = WordCloud(\n max_words=30, min_font_size=15, stopwords=stopwords\n ).process_text(curr_partition_funcs[i])\n freq_words = dict(\n sorted(freq_words.items(), key=lambda x: x[1], reverse=True)\n ) if freq_words else {'no found function': 1} # dealing with no word case\n communities_i[f'most_frequent_function_words_{tag}'] = '; '.join(freq_words.keys()) # saving\n\n # Saving other functions present in this cluster\n other_functions = list(dict.fromkeys([ # dropping duplicates, but preserving order\n func for gene in genes if gene not in central_genes_and_scores.keys() \n for func in gene_func[gene_func.index == gene].to_list() if func not in main_functions\n ]))[:other_functions_until]\n genes_with_other_functions = [\n ','.join(\n gene_func[gene_func == func].loc[lambda x: x.index.isin(genes)].index.to_list()\n ) for func in other_functions\n ]\n other_functions = [\n f'>>> {func} <<<: {gene}' for gene, func in zip(genes_with_other_functions, other_functions)\n ]\n communities_i[f'other_functions_{tag}'] = '; '.join(other_functions) # saving\n\n # Filling information about top inter-community links\n # t_sub = tqdm(range(num_partitions), ascii=True, leave=False)\n for k in range(num_partitions): # t_sub:\n # t_sub.set_description(f'Extracting top inter-community links with {k}')\n\n if i != k:\n genes_in_k = list(all_partition_genes[k].keys())\n\n # Getting the subgraph that contains central genes in community_i and all genes in comunity_k\n G_central_i_k = G.subgraph(list(central_genes_and_scores.keys()) + genes_in_k)\n # Getting the subgraph that contains all genes from community_i and community_k\n G_i_k = G.subgraph(genes + genes_in_k)\n\n # Creating two helper sets that allow us to keep only unique links\n links_central_i_k = set()\n links_i_k = set()\n\n iter_i = 0\n\n # Getting out top links from the second subgraph\n for st, end, edge_info in sorted(G_central_i_k.edges(data=True), \n key=lambda t: t[2]['importance'], \n reverse=True):\n # If the current (reverse directed) link was not encountered previously..\n if (end, st) not in [(uniq_st, uniq_end) for uniq_st, uniq_end, _ in links_central_i_k] and \\\n ((st in genes and end not in genes) or (end in genes and st in genes)):\n links_central_i_k.add((st, end, edge_info['importance']))\n iter_i += 1\n if iter_i == save_top_intercommunity_links_until:\n break\n\n iter_i = 0\n\n # Getting out top links from the second subgraph\n for st, end, edge_info in sorted(G_i_k.edges(data=True), \n key=lambda t: t[2]['importance'], \n reverse=True):\n # If the current (reverse directed) link was not encountered previously..\n if (end, st) not in [(uniq_st, uniq_end) for uniq_st, uniq_end, _ in links_i_k] and \\\n ((st in genes and end not in genes) or (end in genes and st in genes)):\n links_i_k.add((st, end, edge_info['importance']))\n iter_i += 1\n if iter_i == save_top_intercommunity_links_until:\n break\n\n # Adding top links to the dataframe\n communities_i[f'top_links_scores_central_genes<->community_{k}'] = \\\n '; '.join(f'{st} <-> {end} (score={score:.2f})' for st, end, score in links_central_i_k)\n communities_i[f'top_links_scores_with_community_{k}'] = \\\n '; '.join([f'{st} <-> {end} (score={score:.2f})' for st, end, score in links_i_k])\n\n return communities_i\n \n print('Saving info dataframe..')\n \n t = tqdm_cli(range(num_partitions), ascii=True)\n \n # Getting dataframe\n result = Parallel(n_jobs=num_workers)(delayed(compute_community_info)(i) for i in t)\n communities_df = pd.concat(result, axis=1).T.reindex(\n columns=[\n 'num_nodes', 'num_edges',\n 'main_functions_GO', 'main_functions_KEGG', 'main_functions_immunological', 'main_functions_hallmark', \n 'non_lambert_2018_TF_central_genes', 'non_dorothea_TF_central_genes', \n 'new_gene_gene_links_KEGG', 'new_gene_gene_links_hallmark',\n 'whole_G_central_genes_scores',\n 'other_functions_GO', 'other_functions_KEGG', 'other_functions_immunological', 'other_functions_hallmark',\n 'sorted_central_genes_scores',\n 'sorted_central_functions_GO', 'sorted_central_functions_KEGG', 'sorted_central_functions_immunological', 'sorted_central_functions_hallmark', \n 'most_frequent_function_words_GO', 'most_frequent_function_words_KEGG', 'most_frequent_function_words_immunological', 'most_frequent_function_words_hallmark',\n 'all_sorted_genes'] + \n [f'top_links_scores_central_genes<->community_{i}' for i in range(num_partitions)] + \n [f'top_links_scores_with_community_{i}' for i in range(num_partitions)\n ]\n )\n \n # Saving dataframe\n communities_df.to_pickle(data_as)\n print(f\"Saved the data to {data_as}!\\n\")\n \n \ndef run_enrichr(data, is_communities=False, is_positive_markers=True, group_types = 'all', on_targets=False, choose_fixed_tf=None,\n data_type='all', top_n=50, algo='leiden', enrichr_library='MSigDB_Hallmark_2020'):\n \"\"\"\n Run enrichment analysis with Enrichr.\n \"\"\"\n \n import json\n import requests\n import sys\n import io \n \n out_folder = 'community_ana' if is_communities else 'cohort_ana'\n \n if is_communities == True:\n \n print('Running EnrichR on communities..')\n \n algo = 'leiden'\n _DATA_HOME = '/gpfs/projects/bsc08/bsc08890/res/covid_19'\n\n if data_type == 'all':\n community_data = pd.read_pickle(os.path.join(\n _DATA_HOME, 'cell_types', data, 'data', 'grnboost2', f'{algo}_communities', \n f'raw_data_communities_info.pickle'\n ))\n else:\n community_data = pd.read_pickle(os.path.join(\n _DATA_HOME, 'cell_types', data, 'data', 'grnboost2', f'{algo}_communities', \n f'raw_data_{data_type}_type_communities_info.pickle'\n ))\n\n df = pd.concat([\n pd.DataFrame({\n 'cluster': f'cluster_{i}',\n 'gene': [el[: el.find(' ')] for el in vals.split('; ')][:top_n]\n }) for i, vals in community_data['all_sorted_genes'].iteritems()\n ], axis=0).reset_index(drop=True)\n \n else:\n \n if on_targets:\n \n print('Running EnrichR on targets between 3 group types..')\n \n types = ['C', 'M', 'S']\n \n df = pd.concat([\n pd.read_csv(\n f'/gpfs/home/bsc08/bsc08890/tmp/cohort_ana/tmp_enrichr_{data}_{t}_{choose_fixed_tf}_target_list.tsv', \n header=None, names=['gene']\n ).assign(cluster=t) for t in types\n ], axis=0)\n \n else:\n \n if group_types == 'all':\n print('Running EnrichR on TFs between 3 group types..')\n df = pd.read_csv(f'/gpfs/home/bsc08/bsc08890/tmp/tf_markers_df_{data}.tsv', sep='\\t')\n else:\n print('Running EnrichR on 2 group types..')\n if group_types == 'M_S':\n group_types = 'S_M'\n if group_types == 'C_M':\n group_types = 'M_C'\n if group_types == 'C_S':\n group_types = 'S_C'\n df_1 = pd.read_csv(f'/gpfs/home/bsc08/bsc08890/tmp/tf_markers_df_{group_types}_{data}.tsv', sep='\\t')\n df_1['gene'] = df_1.index\n df_2 = df_1.copy()\n df_2['avg_log2FC'] = - df_2['avg_log2FC']\n df_1['cluster'], df_2['cluster'] = group_types.split('_')\n\n df = pd.concat([df_1, df_2], axis=0)\n\n\n if is_positive_markers:\n df = df[(df['p_val_adj'] < 0.05) & (df['avg_log2FC'] > 1)]\n else:\n df = df[(df['p_val_adj'] < 0.05) & (df['avg_log2FC'] < -1)]\n\n cluster_dfs = {}\n for cl in df['cluster'].unique():\n \n print(f'Processing {cl}..')\n\n ENRICHR_URL = 'http://amp.pharm.mssm.edu/Enrichr/addList'\n genes_str = '\\n'.join(df[df['cluster'] == cl]['gene'])\n description = f\"{data}_{data_type}_{cl}\"\n \n if is_communities == True:\n filename = f'tmp/{out_folder}/tmp_enrichr_{data}_{data_type}_{cl}.tsv'\n elif on_targets:\n filename = f'tmp/{out_folder}/tmp_enrichr_{data}_{data_type}_{choose_fixed_tf}_target_{cl}.tsv'\n elif group_types == 'all':\n filename = f'tmp/{out_folder}/tmp_enrichr_{data}_{data_type}_{cl}.tsv'\n else:\n filename = f'tmp/{out_folder}/tmp_enrichr_{data}_2_groups_{cl}.tsv'\n \n\n payload = {\n 'list': (None, genes_str),\n 'description': (None, description)\n }\n response = requests.post(ENRICHR_URL, files=payload)\n\n if not response.ok:\n raise Exception('Error analyzing gene list')\n\n job_id = json.loads(response.text)\n\n ################################################################################\n # Get enrichment results\n #\n ENRICHR_URL = 'http://amp.pharm.mssm.edu/Enrichr/export'\n query_string = '?userListId=%s&filename=%s&backgroundType=%s'\n user_list_id = str(job_id['userListId'])\n gene_set_library = str(enrichr_library)\n url = ENRICHR_URL + query_string % (user_list_id, filename, gene_set_library)\n\n response = requests.get(url, stream=True)\n\n print(' Enrichr API : Downloading file of enrichment results: Job Id:', job_id)\n with open(filename, 'wb') as f:\n for chunk in response.iter_content(chunk_size=1024):\n if chunk:\n f.write(chunk)\n \n print(f' Saved to {filename}')\n \n cluster_dfs[cl] = pd.read_csv(filename, sep='\\t')\n\n return cluster_dfs\n \n\ndef betweenness_centrality_parallel(G, processes=None):\n \"\"\"Parallel betweenness centrality function\"\"\"\n from multiprocessing import Pool\n \n def chunks(l, n):\n \"\"\"Divide a list of nodes `l` in `n` chunks\"\"\"\n l_c = iter(l)\n while 1:\n x = tuple(itertools.islice(l_c, n))\n if not x:\n return\n yield x\n \n p = Pool(processes=processes)\n node_divisor = len(p._pool) * 4\n node_chunks = list(chunks(G.nodes(), int(G.order() / node_divisor)))\n num_chunks = len(node_chunks)\n bt_sc = p.starmap(\n nx.betweenness_centrality_subset,\n zip(\n [G] * num_chunks,\n node_chunks,\n [list(G)] * num_chunks,\n [True] * num_chunks,\n ['distance'] * num_chunks\n ),\n )\n\n # Reduce the partial solutions\n bt_c = bt_sc[0]\n for bt in bt_sc[1:]:\n for n in bt:\n bt_c[n] += bt[n]\n return bt_c\n\n\n"},"repo_name":{"kind":"string","value":"masyahook/Single-cell-gene-regulatory-networks"},"sub_path":{"kind":"string","value":"scGRN/func.py"},"file_name":{"kind":"string","value":"func.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":43101,"string":"43,101"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"en"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":0,"string":"0"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"6"}}},{"rowIdx":371,"cells":{"seq_id":{"kind":"string","value":"28031461245"},"text":{"kind":"string","value":"#!/usr/bin/python3\n\nfrom time import sleep\nfrom datetime import date, datetime\nfrom pynput.keyboard import Key, Controller\nfrom logging.handlers import RotatingFileHandler\nimport sys, signal, argparse, logging, platform, subprocess\n\n# ----------------------------------Configuration--------------------------------\nVOLUME = \"0.3\"\nBREAK_NUM = 1\nWORK_DURATION = 900\nBREAK_DURATION = 120\n\nMAC = False\nLINUX = False\nWINDOWS = False\n\nLINUX_PATH = \"\"\nMAC_PATH = \"/Users/mutnawaz/Desktop/Muteeb/Code/timer/\"\nWINDOWS_PATH = \"C:\\\\Users\\\\Muteeb\\\\Desktop\\\\RV Major Project\\\\Personal\\\\timer\\\\\"\n\n# ---------------------------------end of Configuration---------------------------\n\nlog = None\n\n\ndef __init_logger():\n global log\n if log is not None:\n log.debug(\"logger already initialized.\")\n return None\n\n try:\n \"log format \"\n log_formatter = logging.Formatter(\"%(levelname)5.5s %(filename)5s#%(lineno)3s %(message)s\")\n\n \"Refer the log file path\"\n PATH = get_path()\n log_file = PATH + \"timer.log\"\n\n \"Max size of the log file is 2MB, it rotate if size exceeds\"\n handler = RotatingFileHandler(\n log_file,\n mode=\"a\",\n maxBytes=(2 * 1024 * 1024),\n backupCount=4,\n encoding=None,\n delay=0,\n )\n\n \"appy the log format and level\"\n handler.setFormatter(log_formatter)\n handler.setLevel(logging.DEBUG)\n log = logging.getLogger(\"timer.log\")\n log.setLevel(logging.DEBUG)\n\n \"apply the settings to the log\"\n log.addHandler(handler)\n log.debug(\"Start logging the times\")\n return handler\n\n except Exception as e:\n log.error(\"Failed to create logger: %s\", str(e))\n\n\ndef exit_handler(sig, frame):\n print(\"\\nGood bye. Have a nice day!\\n\")\n greet()\n sys.exit(0)\n\n\ndef greet():\n try:\n print(subprocess.check_output(\"motivate\", shell=True, stderr=subprocess.DEVNULL).decode())\n except:\n print(\"\\n******************************************************\")\n print(\"* *\")\n print(\"* *\")\n print(\"* You can do it! Sending lots of energy to you :) *\")\n print(\"* *\")\n print(\"* *\")\n print(\"******************************************************\")\n\n\ndef get_time():\n now = datetime.now()\n time = now.strftime(\"%H:%M:%S\")\n return time\n\n\ndef play_sound(sound_file):\n if MAC:\n subprocess.check_output(\"afplay --volume \" + VOLUME + \" {}\".format(sound_file), shell=True)\n elif LINUX:\n subprocess.check_output(\"aplay -q {}&\".format(sound_file), shell=True)\n else:\n winsound.PlaySound(sound_file, winsound.SND_ASYNC)\n\n\ndef get_path():\n if MAC:\n return MAC_PATH\n elif LINUX:\n return LINUX_PATH\n else:\n return WINDOWS_PATH\n\n\ndef display_sleep():\n if MAC:\n # subprocess.check_output(\"pmset displaysleepnow\", shell=True) # Put system to sleep.\n subprocess.check_output(\"open -a ScreenSaverEngine\", shell=True)\n\n\ndef wakeup():\n if MAC:\n # subprocess.check_output(\"pmset relative wake 1\", shell=True) # Wakeup the system.\n # log.debug(\"Waking up.\")\n keyboard = Controller()\n key = Key.esc\n\n keyboard.press(key)\n keyboard.release(key)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-s\", \"--slient\", action=\"store_true\", help=\"Run in silent mode.\")\n args = vars(parser.parse_args())\n\n if platform.system() == \"linux\" or platform.system() == \"linux2\":\n LINUX = True\n elif platform.system() == \"darwin\" or platform.system() == \"Darwin\":\n MAC = True\n elif platform.system() == \"win32\" or platform.system() == \"Windows\":\n WINDOWS = True\n if not args[\"slient\"]:\n try:\n import winsound\n except Exception as e:\n print(\"Sound is not supported in windows. Reason: {0}\".format(e))\n args[\"slient\"] = True\n\n __init_logger()\n PATH = get_path()\n signal.signal(signal.SIGINT, exit_handler)\n greet()\n\n if args[\"slient\"]:\n print(\"Running in slient mode...\")\n\n log.info(\"Today's date: {0}\".format(date.today()))\n if not args[\"slient\"]:\n play_sound(PATH + \"start_timer.wav\")\n\n while True:\n\n log.info(\"Work number {0}, start time {1}\".format(BREAK_NUM, get_time()))\n sleep(WORK_DURATION)\n log.info(\"Work number {0}, end time {1}\".format(BREAK_NUM, get_time()))\n if not args[\"slient\"]:\n play_sound(PATH + \"take_break.wav\")\n\n display_sleep()\n\n log.info(\"Break number {0}, start time {1}\".format(BREAK_NUM, get_time()))\n sleep(BREAK_DURATION)\n log.info(\"Break number {0}, end time {1}\".format(BREAK_NUM, get_time()))\n if not args[\"slient\"]:\n play_sound(PATH + \"two_mins_up.wav\")\n\n wakeup()\n BREAK_NUM += 1\n"},"repo_name":{"kind":"string","value":"muteebakram/Timer"},"sub_path":{"kind":"string","value":"main.py"},"file_name":{"kind":"string","value":"main.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":5198,"string":"5,198"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"en"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":0,"string":"0"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"6"}}},{"rowIdx":372,"cells":{"seq_id":{"kind":"string","value":"22791755556"},"text":{"kind":"string","value":"import sys\nsys.path.insert(0, '../../class')\n\nimport os\nimport time\nimport nnet\nimport cubelattice as cl\nimport multiprocessing\nfrom functools import partial\nfrom scipy.io import loadmat\nimport numpy as np\nimport argparse\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser(description='Verification Settings')\n parser.add_argument('--property', type=str, default='1')\n parser.add_argument('--n1', type=int, default=2)\n parser.add_argument('--n2', type=int, default=3)\n parser.add_argument('--compute_unsafety', action='store_true')\n args = parser.parse_args()\n \n i = args.n1\n j = args.n2\n\n def verification(afv):\n safe = True\n return safe\n\n print(\"neural_network_\"+str(i)+str(j))\n nn_path = \"nets/neural_network_information_\"+str(i)+str(j)+\".mat\"\n filemat = loadmat(nn_path)\n if not os.path.isdir('logs'):\n os.mkdir('logs')\n\n W = filemat['W'][0]\n b = filemat['b'][0]\n\n lb = [-0.1,-0.1,-0.1]\n ub = [0.1,0.1,0.1]\n\n nnet0 = nnet.nnetwork(W, b)\n nnet0.verification = verification\n initial_input = cl.cubelattice(lb, ub).to_lattice()\n cpus = multiprocessing.cpu_count()\n pool = multiprocessing.Pool(cpus)\n\n nnet0.start_time = time.time()\n nnet0.filename = \"logs/output_info\"+str(i)+str(j)+'.txt'\n outputSets = []\n nputSets0 = nnet0.singleLayerOutput(initial_input, 0)\n pool.map(partial(nnet0.layerOutput, m=1), nputSets0)\n pool.close()\n elapsed_time = time.time() - nnet0.start_time\n\n print('time elapsed: %f seconds \\n' % elapsed_time)\n print('result: safe\\n')\n filex = open(nnet0.filename, 'w')\n filex.write('time elapsed: %f seconds \\n' % elapsed_time)\n filex.write('result: safe\\n')\n filex.close()\n\n"},"repo_name":{"kind":"string","value":"Shaddadi/veritex"},"sub_path":{"kind":"string","value":"examples/Microbenchmarks/main.py"},"file_name":{"kind":"string","value":"main.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":1739,"string":"1,739"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"en"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":10,"string":"10"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"6"}}},{"rowIdx":373,"cells":{"seq_id":{"kind":"string","value":"24044811304"},"text":{"kind":"string","value":"#compare parameter between abc-smc\nimport seaborn as sns\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport sys\nfrom scipy import stats\nfrom matplotlib.colors import LogNorm, Normalize\nfrom scipy.signal import argrelextrema\n\n\nfilename=[\"ACDC_X2\",\"ACDC_Y2\",\"ACDC_Z2\"]#,\"ACDC_all\"]\n#filename=['ACDC_X2']\nfilename=['ACDC_X2','ACDC_X21ind']\nn=['final']\n#n=['1','2','3','4','5','6','7','8','9','10','11','12','final']#'13','14','15','final']\n#n=['1','2','3','4','5','6','7','8','9','10','11','12','13','final']#,'12','13','14','final']#,'15']#,'final']\n\npath='C:/Users/Administrator/Desktop/Modeling/AC-DC/'\npath='/users/ibarbier/AC-DC/'\nsys.path.insert(0, path + filename[0])\n\nimport model_equation as meq\n \nparlist=meq.parlist\n\nnamelist=[]\nfor i,par in enumerate(parlist):\n namelist.append(parlist[i]['name'])\n\n\npar0 = {\n 'K_ARAX':-3.5,#0.01,\n 'n_ARAX':2,\n 'K_XY':-2.5,\n 'n_XY':2,\n 'K_XZ':-1.55,#-1.25\n 'n_XZ':2,\n 'beta_X':1,\n 'alpha_X':0,\n 'delta_X':1,\n \n 'K_ARAY':-3.5,\n 'n_ARAY':2,\n 'K_YZ':-3.5,\n 'n_YZ':2,\n 'beta_Y':1,\n 'alpha_Y':0,\n 'delta_Y':1,\n \n 'K_ZX':-2.5, \n 'n_ZX':2,\n 'beta_Z':1,\n 'alpha_Z':0,\n 'delta_Z':1,\n\n 'beta/alpha_X':2,\n 'beta/alpha_Y':2,\n 'beta/alpha_Z':2\n\n\n}\n\ndef pars_to_dict(pars,parlist):\n### This function is not necessary, but it makes the code a bit easier to read,\n### it transforms an array of pars e.g. p[0],p[1],p[2] into a\n### named dictionary e.g. p['k0'],p['B'],p['n'],p['x0']\n### so it is easier to follow the parameters in the code\n dict_pars = {}\n for ipar,par in enumerate(parlist):\n dict_pars[par['name']] = pars[ipar] \n return dict_pars\n\ndef load(number= n,filename=filename,parlist=parlist):\n namelist=[]\n for i,par in enumerate(parlist):\n namelist.append(parlist[i]['name'])\n number=str(number)\n filepath = path+filename+'/smc/pars_' + number + '.out'\n dist_path = path+filename+'/smc/distances_' + number + '.out'\n raw_output= np.loadtxt(filepath)\n dist_output= np.loadtxt(dist_path)\n df = pd.DataFrame(raw_output, columns = namelist)\n df['dist']=dist_output\n df=df.sort_values('dist',ascending=False)\n distlist= sorted(df['dist'])\n p=[]\n for dist in distlist: \n p_0=df[df['dist']==dist]\n p0=[]\n for n in namelist:\n p0.append(p_0[n].tolist()[0]) \n p0=pars_to_dict(p0,parlist)\n p.append(p0) \n return p, df\n\n\n\ndef get_stats(filename,namelist):\n stats_df = pd.DataFrame( columns = ['par','file','mean','sd','mode'])\n parl = np.append(namelist,'dist')\n # for fi,fnm in enumerate(filename):\n fnm=filename[0]\n p,df= load(n[0],fnm,parlist)\n mean=np.mean(df).tolist()\n sd=np.std(df).tolist()\n mode=stats.mode(df)[0][0]\n new_row={'par':parl,'file':[fnm]*len(parl),'mean':mean,'sd':sd,'mode':mode}\n df2=pd.DataFrame(new_row)\n stats_df =stats_df.append(df2)\n return stats_df\n\n\ndef bar_plot(filename,namelist, t=\"mean\"):\n stats_df=get_stats(filename,namelist)\n # set width of bars\n barWidth = 0.20\n # Set position of bar on X axis\n r1 = np.arange(len(parl))\n\n #mean\n if t==\"mean\":\n for i,nm in enumerate(filename):\n v=stats_df[stats_df['method']==nm]\n plt.bar((r1+barWidth*i),v['mean'],yerr=v['sd'], capsize=2,width=barWidth, label=nm)\n\n plt.xlabel('par', fontweight='bold')\n plt.xticks([r + barWidth for r in range(len(parl))], parl)\n plt.legend()\n plt.show()\n\n #mode \n if t == \"mode\":\n for i,nm in enumerate(filename):\n v=stats_df[stats_df['method']==nm]\n plt.bar((r1+barWidth*i),v['mode'],width=barWidth, label=nm)\n\n plt.xlabel('par', fontweight='bold')\n plt.xticks([r + barWidth for r in range(len(parl))], parl)\n plt.legend()\n plt.show()\n\n\ndef plot_compare(n,filename,namelist):\n\n parl = np.append(namelist,'dist')\n index=1\n size=round(np.sqrt(len(parl)))\n for i,name in enumerate(parl):\n plt.subplot(size,size,index)\n plt.tight_layout()\n for fi,fnm in enumerate(filename):\n p,df= load(n,fnm,namelist1)\n sns.kdeplot(df[name],bw_adjust=.8,label=fnm)\n #plt.ylim(0,1)\n if i < (len(parl)-2):\n plt.xlim((parlist[i]['lower_limit'],parlist[i]['upper_limit']))\n index=index+1\n if index==5: \n plt.legend(bbox_to_anchor=(1.05, 1))\n\n #sns.kdeplot(df['K_XZ'])\n plt.savefig(str(filename)+str(n)+\"_compareplot.pdf\", bbox_inches='tight')\n plt.show()\n\n\n#plot_compare(n[0],filename,namelist)\n\n\ndef plot_alltime(filename,namelist):\n parl = np.append(namelist,'dist')\n index=1\n for i,name in enumerate(parl):\n plt.subplot(4,4,index)\n plt.tight_layout()\n for ni,nmbr in enumerate(n):\n p,df= load(nmbr,filename[0],parlist)\n sns.kdeplot(df[name],bw_adjust=.8,label=nmbr)\n #plt.ylim(0,1)\n if i < (len(parl)-2):\n plt.xlim((parlist[i]['lower_limit'],parlist[i]['upper_limit']))\n index=index+1\n #if index==5: \n plt.legend(bbox_to_anchor=(1.05, 1))\n plt.show()\n\n#plot_alltime(['ACDC_X2'],namelist)\n\ndef plotdistpar(filename,namelist):\n index=1\n for ni,nb in enumerate(n):\n p,df= load(nb,filename[0],parlist)\n for i,name in enumerate(namelist):\n plt.subplot(len(n),len(namelist),index)\n # plt.tight_layout()\n plt.scatter(df['dist'],df[name],s=1)\n mean=np.mean(df[name]).tolist()\n mode=stats.mode(df[name])[0][0]\n\n plt.plot([0,40],[mean,mean],'r',label=\"mean\")\n plt.plot([0,40],[mode,mode],'g',label=\"meode\")\n\n plt.ylim((parlist[i]['lower_limit'],parlist[i]['upper_limit']))\n plt.ylabel(name)\n index=index+1\n plt.legend(bbox_to_anchor=(1.05, 1))\n plt.show()\n\n\n'''\nARA=np.logspace(-4.5,-2.,10,base=10)\np,df= load(n[0],filename[0],parlist)\n\nstdf=get_stats(filename,namelist)\npmean=pars_to_dict(stdf['mean'])\npmode=pars_to_dict(stdf['mode'])\n\n\nfor i,p in enumerate([p[0],pmean,pmode,p[999]]):\n\n X,Y,Z=meq.model(ARA,p)\n df_X=pd.DataFrame(X,columns=ARA)\n df_Y=pd.DataFrame(Y,columns=ARA)\n df_Z=pd.DataFrame(Z,columns=ARA)\n\n plt.subplot(4,3,(1+3*i))\n sns.heatmap(df_X, cmap=\"Reds\")\n plt.subplot(4,3,(2+3*i))\n sns.heatmap(df_Y, cmap ='Blues')\n plt.subplot(4,3,(3+3*i))\n sns.heatmap(df_Z, cmap ='Greens')\n\nplt.show()\n\n\nX,Y,Z=meq.model(ARA,pmode)\nplt.plot(X[:,0],label=\"DCoff\")\nplt.plot(X[:,3],label=\"AC1\")\nplt.plot(X[:,6],label=\"AC2\")\nplt.plot(X[:,9],label=\"DCon\")\nplt.plot([200,200],[0,1000],'--')\n\nplt.legend(bbox_to_anchor=(1.05, 1))\nplt.tight_layout()\nplt.show()\n'''\n\n#####1indvs2ind\ndef plotdesnity1vs2():\n p2,df2= load('final','ACDC_X2',parlist)\n parlist1=parlist.copy()\n del parlist1[7:9]\n p1,df1= load('final','ACDC_X21ind',parlist1)\n\n namelist=[]\n for i,par in enumerate(parlist1):\n namelist.append(par['name'])\n \n parl = np.append(namelist,'dist')\n index=1\n for i,name in enumerate(parl):\n plt.subplot(4,4,index)\n plt.tight_layout()\n\n sns.kdeplot(df1[name],bw_adjust=.8,label='X_1ind')\n sns.kdeplot(df2[name],bw_adjust=.8,label='X_2ind')\n #plt.ylim(0,1)\n if i < (len(parl)-2):\n plt.xlim((parlist1[i]['lower_limit'],parlist1[i]['upper_limit']))\n index=index+1\n if index==5: \n plt.legend(bbox_to_anchor=(1.05, 1))\n\n #sns.kdeplot(df['K_XZ'])\n plt.savefig(\"1vs2ind\"+str(n[0])+\"_compareplot.pdf\", bbox_inches='tight')\n #plt.show()\n \nplotdesnity1vs2()\n\ndef ind1vs2indmeanandmode():\n p2,df2= load('final','ACDC_X',parlist)\n df2=df2.drop(columns=['K_ARAY', 'n_ARAY'])\n mean_df2=np.mean(df2)\n sd_df2=np.std(df2)\n mode_df2=stats.mode(df2)[0][0]\n parlist1=parlist.copy()\n del parlist1[7:9]\n p1,df1= load('12','ACDC_1ind',parlist1)\n mean_df1=np.mean(df1)\n sd_df1=np.std(df1)\n mode_df1=stats.mode(df1)[0][0]\n\n namelist=[]\n for i,par in enumerate(parlist1):\n namelist.append(par['name']) \n parl = np.append(namelist,'dist')\n # set width of bars\n barWidth = 0.30\n # Set position of bar on X axis\n r1 = np.arange(len(parl))\n plt.bar((r1+barWidth*0),mean_df1,yerr=sd_df1, capsize=2,width=barWidth, label=\"1ind\")\n plt.bar((r1+barWidth*1),mean_df2,yerr=sd_df2, capsize=2,width=barWidth, label=\"2ind\")\n plt.xlabel('par', fontweight='bold')\n plt.xticks([r + barWidth for r in range(len(parl))], parl)\n plt.legend()\n plt.show()\n plt.bar((r1+barWidth*0),mode_df1,width=barWidth, label=\"1ind\")\n plt.bar((r1+barWidth*1),mode_df2,width=barWidth, label=\"2ind\")\n plt.xlabel('par', fontweight='bold')\n plt.xticks([r + barWidth for r in range(len(parl))], parl)\n plt.legend()\n plt.show()\n\n\n\n\n\n\ndef calculateSS(ARA,parUsed):\n #sort ss according to their stabilitz\n #create stability list of shape : arabinose x steady x X,Y,Z\n unstable=np.zeros((len(ARA),3,3))\n stable=np.zeros((len(ARA),3,3))\n oscillation=np.zeros((len(ARA),3,3))\n unstable[:]=np.nan\n stable[:]=np.nan\n oscillation[:]=np.nan\n\n for ai,a in enumerate(ARA):\n ss=meq.findss(a,parUsed)\n if len(ss) > 3:\n print(\"error: more than 3 steadystates\")\n else:\n d = b = c=0 # can replace a,b,c by si, but allow to have osccilation on the same level\n for si,s in enumerate(ss):\n e=meq.stability(a,parUsed,[s])[0][0]\n if all(e<0):\n stable[ai][d]=s\n d+=1\n if any(e>0):\n pos=e[e>0]\n if len(pos)==2:\n if pos[0]-pos[1] == 0:\n oscillation[ai][b]=s\n b+=1\n else:\n unstable[ai][c]=s\n c+=1\n else:\n unstable[ai][c]=s \n c+=1 \n return unstable,stable,oscillation\n\n\n#chose parameter\ndef bifurcation(parUsed=None):\n p,df= load('final','ACDC_X2',parlist)\n #parUsed=par0\n if parUsed == None:\n parUsed=p[0]\n ARA=np.logspace(-4.5,-2.,20,base=10)\n ss=meq.findss(ARA[0],parUsed)[0]\n #print(ss)\n init=[ss[0],ss[1],ss[2]]\n X,Y,Z=meq.model(ARA,parUsed,totaltime=100,init=init)\n df_X=pd.DataFrame(X[500:],columns=ARA)\n sns.heatmap(df_X, cmap=\"Reds\", norm=LogNorm())\n plt.show()\n\n xss,yss,zss = calculateSScurve(ARA,parUsed)\n\n\n\n maxX=[]\n minX=[]\n maxY=[]\n minY=[]\n maxZ=[]\n minZ=[]\n # X,Y,Z=meq.model(ARA,parUsed,totaltime=400)\n delta=10e-5\n for i in np.arange(0,len(ARA)):\n min_x=[np.nan,np.nan,np.nan]\n max_x=[np.nan,np.nan,np.nan]\n ss=meq.findss(ARA[i],parUsed)\n for si,s in enumerate(ss):\n init=[s[0]+delta,s[1]+delta,s[2]+delta]\n X,Y,Z=meq.model(ARA,parUsed,totaltime=100,init=init)\n # print(max(X[200:,i]))\n\n max_x[si]=max(X[200:,i])\n min_x[si]=min(X[200:,i])\n\n \n\n\n\n maxX.append(max_x)\n minX.append(min_x)\n\n \n # minX.append(min(X[200:,i]))\n maxY.append(max(Y[200:,i]))\n minY.append(min(Y[200:,i]))\n maxZ.append(max(Z[200:,i]))\n minZ.append(min(Z[200:,i]))\n plt.subplot(3,1,1)\n plt.plot(ARA,xss,'--o')\n plt.plot(ARA,maxX,'-b')\n plt.plot(ARA,minX,'-g')\n #plt.fill_between(ARA,maxX,minX,alpha=0.2,facecolor='red')\n plt.yscale(\"log\")\n plt.xscale(\"log\")\n plt.subplot(3,1,2)\n plt.plot(ARA,yss,'--b')\n # plt.plot(ARA,maxY,'-b')\n # plt.plot(ARA,minY,'-b')\n # plt.fill_between(ARA,maxY,minY,alpha=0.2,facecolor='blue')\n plt.yscale(\"log\")\n plt.xscale(\"log\")\n plt.subplot(3,1,3)\n plt.plot(ARA,zss,'--g')\n # plt.plot(ARA,maxZ,'-g')\n # plt.plot(ARA,minZ,'-g')\n # plt.fill_between(ARA,maxZ,minZ,alpha=0.2,facecolor='green')\n plt.yscale(\"log\")\n plt.xscale(\"log\")\n plt.show()\n\n\ndef getlimitcycle(ARA,ssl,par,tt=500):\n M=np.ones((len(ARA),3,3))*np.nan\n m=np.ones((len(ARA),3,3))*np.nan\n delta=10e-5\n transient=500\n for ai,a in enumerate(ARA):\n ss=ssl[ai]\n for si,s in enumerate(ss):\n if any(np.isnan(s)) == False:\n init=[s[0]+delta,s[1]+delta,s[2]+delta]\n X,Y,Z=meq.model([a],par,totaltime=tt,init=init)\n M[ai,si,0]=max(X[transient:])\n M[ai,si,1]=max(Y[transient:])\n M[ai,si,2]=max(Z[transient:])\n m[ai,si,0]=min(X[transient:])\n m[ai,si,1]=min(Y[transient:])\n m[ai,si,2]=min(Z[transient:])\n\n max_list=argrelextrema(X[transient:], np.greater)\n maxValues=X[transient:][max_list]\n min_list=argrelextrema(X[transient:], np.less)\n minValues=X[transient:][min_list]\n\n maximaStability = abs(maxValues[-2]-minValues[-2])-(maxValues[-3]-minValues[-3])\n if maximaStability > 0.01:\n print(\"limit cycle not achieved for ARA[\"+str(ai)+\"]:\" + str(a) + \" at st.s:\"+ str(s))\n\n return M,m\n\n\n\n\n\ndef bifurcation_plot(n,filename):\n p,df= load(n,filename,parlist)\n ARA=np.logspace(-4.5,-2.,200,base=10)\n un,st,osc=calculateSS(ARA,p[1])\n M,m=getlimitcycle(ARA,osc,p[1],tt=500)\n for i,col in enumerate(['r','b','g']):\n plt.subplot(3,1,i+1)\n plt.plot(ARA,un[:,:,i],'--'+col)\n plt.plot(ARA,st[:,:,i],'-'+col)\n plt.plot(ARA,osc[:,:,i],'--'+col)\n plt.fill_between(ARA,M[:,0,i],m[:,0,i],alpha=0.2,facecolor=col)\n plt.fill_between(ARA,M[:,1,i],m[:,1,i],alpha=0.2,facecolor=col)\n plt.fill_between(ARA,M[:,2,i],m[:,2,i],alpha=0.2,facecolor=col)\n plt.yscale(\"log\")\n plt.xscale(\"log\")\n plt.show()\n\n\n#bifurcation(p[1])\n"},"repo_name":{"kind":"string","value":"icvara/AC-DC"},"sub_path":{"kind":"string","value":"compareplot.py"},"file_name":{"kind":"string","value":"compareplot.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":14082,"string":"14,082"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"en"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":0,"string":"0"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"6"}}},{"rowIdx":374,"cells":{"seq_id":{"kind":"string","value":"2416692184"},"text":{"kind":"string","value":"from pygame import *\nfrom random import randrange\nfrom math import *\n\nfrom Pong.GameStats import GameStats\nfrom Pong.Player.Goal import Goal\nfrom Pong.Player.PlayerRacket import PlayerRacket\n\n\nclass Ball:\n\n MAX_SPEED_Y = 12\n SPEED_X = 6\n COLOR = (int(255), int(255), int(255))\n RADIUS: int = 10\n WIN_SCORE = 10\n\n def __init__(self, players):\n self.velocity = (Ball.SPEED_X, randrange(-Ball.MAX_SPEED_Y, Ball.MAX_SPEED_Y))\n self.pos = (int(GameStats.width/2), int(GameStats.height/2))\n self.players = players\n\n def update_move(self):\n # if there is collision\n self.pos = (self.velocity[0] + self.pos[0], self.pos[1] + self.velocity[1])\n self.collision_update()\n if self.pos[0] < -5 or self.pos[0] > 640:\n self.pos = (320, 320)\n self.velocity = (Ball.SPEED_X, randrange(-Ball.MAX_SPEED_Y, Ball.MAX_SPEED_Y))\n elif self.pos[1] < 0 or self.pos[1] > GameStats.height:\n self.velocity = (self.velocity[0], -self.velocity[1])\n\n def draw(self, surface):\n self.update_move()\n draw.circle(surface, Ball.COLOR, self.pos, Ball.RADIUS)\n\n def collision_update(self):\n col_pos = (0, 0)\n col_body = None\n collision = False\n\n for p in [self.players[0].racket, self.players[1].racket, self.players[0].goal, self.players[1].goal]:\n for point in ((self.pos[0] + Ball.RADIUS*cos(theta*0.01), self.pos[1] + Ball.RADIUS*sin(theta*0.01))\n for theta in range(0, int(pi*2*100))):\n if p[0] < point[0] < p[0] + p[2] and \\\n p[1] < point[1] < p[1] + p[3]:\n col_pos = point\n col_body = p\n collision = True\n break\n if collision:\n break\n if collision:\n if type(col_body) is PlayerRacket:\n self.velocity = (-self.velocity[0], int((col_pos[1] - col_body[1] -\n col_body[3]/2)/col_body[3]*Ball.MAX_SPEED_Y*2))\n elif type(col_body) is Goal:\n if self.players[0].goal == col_body:\n if self.players[0].score() == self.WIN_SCORE:\n self.players[0].reset()\n self.players[1].reset()\n\n if self.players[1].goal == col_body:\n if self.players[1].score() == self.WIN_SCORE:\n self.players[0].reset()\n self.players[1].reset()\n self.pos = (GameStats.width//2, GameStats.height//2)\n self.velocity = ((Ball.SPEED_X * ((-1) ** randrange(2))), randrange(-Ball.MAX_SPEED_Y, Ball.MAX_SPEED_Y))\n\n def __getitem__(self, key):\n return self.pos[key]\n\n"},"repo_name":{"kind":"string","value":"dogancanalgul/Pong"},"sub_path":{"kind":"string","value":"ball.py"},"file_name":{"kind":"string","value":"ball.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":2820,"string":"2,820"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"en"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":0,"string":"0"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"6"}}},{"rowIdx":375,"cells":{"seq_id":{"kind":"string","value":"11047304211"},"text":{"kind":"string","value":"'''----------------------------------------------------------------------------\n\nengine.py\n\n\n\n----------------------------------------------------------------------------'''\n\nfrom engine.ssc.image_ini import *\nimport numpy as np\n#import sunpy.instr.aia\n\n\ndef standard_multitype_ini(observations):\n '''Standard initialization for different kind of observation. The\n initialization contains ratiation, limb darkening correction, Bz\n estimation and limb out region remove.\n\n Parameter\n ---------\n observations - Sunpy map object, it can contain multiple images.\n\n Return\n ------\n observations - Sunpy map object with modified data'''\n\n # Create a new list for the initialized observations\n initialized_observations = []\n\n for obs in observations:\n\n if obs.detector == 'HMI':\n # Replace np.nan-s with zero for rotating\n obs._data = np.nan_to_num(obs.data)\n\n # Rotate the observations\n obs = obs.rotate()\n\n # Limb darkening correction, only HIM white lighe image\n if obs.measurement == 'continuum':\n obs = dark_limb.limb_darkening_correct(obs, limb_cut=0.99)\n\n # Longitudinal magnetic field to Bz estimation\n if obs.measurement == 'magnetogram':\n obs = blbz.LOS2Bz(obs)\n\n # Cut the limb and replace outlimb region with np.nan\n obs = cut.solar_limb(obs)\n\n #if obs.detector == 'AIA':\n # Processes a level 1 AIAMap into a level 1.5 AIAMap\n #obs = sunpy.instr.aia.aiaprep(obs)\n\n # Append the new maps\n initialized_observations.append(obs)\n\n # Delete raw observations\n del observations\n\n return initialized_observations\n"},"repo_name":{"kind":"string","value":"gyengen/SheffieldSolarCatalog"},"sub_path":{"kind":"string","value":"engine/initialisation.py"},"file_name":{"kind":"string","value":"initialisation.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":1753,"string":"1,753"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"en"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":1,"string":"1"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"6"}}},{"rowIdx":376,"cells":{"seq_id":{"kind":"string","value":"70747391228"},"text":{"kind":"string","value":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Oct 28 12:54:38 2016\r\n\r\n@author: Kylin\r\n\"\"\"\r\nimport math \r\nimport quyu\r\nimport erfenbijin\r\nimport pylab as pl\r\n \r\na = 200\r\nRx = 10\r\nRy = 20\r\nV0 = 100\r\ntheta = math.pi/5\r\ndt = 0.1\r\nVx = V0*math.cos(theta)\r\nVy = V0*math.sin(theta)\r\nR_x = []\r\nV_x = []\r\ni = 0 \r\nwhile 1 :\r\n Rx=Rx+Vx*dt\r\n Ry=Ry+Vy*dt\r\n if Ry*(Ry-Vy*dt)<0:\r\n k=(Ry-0)/(0-(Ry-Vy*dt))\r\n x0=((1+k)*Rx-k*Vx*dt)/(1+k)\r\n R_x+=[x0]\r\n V_x+=[Vx]\r\n if quyu.inZhengfangxing(Rx,Ry,a)==1\\\r\n or quyu.inZhengfangxing(Rx,Ry,a)==0:\r\n continue\r\n if quyu.inZhengfangxing(Rx,Ry,a)==-1:\r\n x1=Rx-Vx*dt\r\n y1=Ry-Vy*dt\r\n x2=Rx\r\n y2=Ry\r\n t=erfenbijin.Zhengfangxing_erFenbijin(x1,y1,x2,y2,0,a)\r\n Rx=t[0]\r\n Ry=t[1]\r\n #continue\r\n if quyu.inZhengfangxing(Rx,Ry,a)==0:\r\n if (Rx== a or Rx==-a) and Ry>-a and Ry-a and Rx10000:\r\n break\r\npl.plot(R_x, V_x,\"o\",label=\"Vx-Rx\")\r\npl.title(u\"正方形\".encode(\"gb2312\"))\r\npl.xlabel('Rx')\r\npl.ylabel('Vx')\r\npl.legend()\r\npl.show()\r\n\r\n"},"repo_name":{"kind":"string","value":"52kylin/compuational_physics_N2014301020034"},"sub_path":{"kind":"string","value":"exercise_09_new/code/zfxvx.py"},"file_name":{"kind":"string","value":"zfxvx.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":1459,"string":"1,459"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"en"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":0,"string":"0"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"6"}}},{"rowIdx":377,"cells":{"seq_id":{"kind":"string","value":"27251269716"},"text":{"kind":"string","value":"\"\"\"\n文件名: Code/Chapter05/C01_ConfigManage/E02_Config.py\n创建时间: 2023/2/26 3:47 下午\n作 者: @空字符\n公众号: @月来客栈\n知 乎: @月来客栈 https://www.zhihu.com/people/the_lastest\n\"\"\"\n\nimport os\n\n\nclass ModelConfig(object):\n def __init__(self,\n train_file_path=os.path.join('data', 'train.txt'),\n val_file_path=os.path.join('data', 'val.txt'),\n test_file_path=os.path.join('data', 'test.txt'),\n split_sep='_!_',\n is_sample_shuffle=True,\n batch_size=16,\n learning_rate=3.5e-5,\n max_sen_len=None,\n num_labels=3,\n epochs=5):\n self.train_file_path = train_file_path\n self.val_file_path = val_file_path\n self.test_file_path = test_file_path\n self.split_sep = split_sep\n self.is_sample_shuffle = is_sample_shuffle\n self.batch_size = batch_size\n self.learning_rate = learning_rate\n self.max_sen_len = max_sen_len\n self.num_labels = num_labels\n self.epochs = epochs\n\n#\ndef train(config):\n dataset = get_dataset(config)\n model = get_mode(config)\n\n\nif __name__ == '__main__':\n config = ModelConfig(epochs=10)\n print(f\"epochs = {config.epochs}\")\n # train(config)\n"},"repo_name":{"kind":"string","value":"moon-hotel/DeepLearningWithMe"},"sub_path":{"kind":"string","value":"Code/Chapter05/C01_ConfigManage/E02_Config.py"},"file_name":{"kind":"string","value":"E02_Config.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":1326,"string":"1,326"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"en"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":116,"string":"116"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"6"}}},{"rowIdx":378,"cells":{"seq_id":{"kind":"string","value":"70402167867"},"text":{"kind":"string","value":"import const\nimport sys, os\nimport string\nimport random\n\n\nQUESTION_TOOL='What are the tools used in the attack?'\nQUESTION_GROUP='Who is the attack group?'\n\nINPUT_FILE='input/sample_attack_report_raw.txt'\nTRAIN_RATE=0.8\nVUL_RATE=0.1\nLABEL_TRAIN='train'\nLABEL_VAL='dev'\nLABEL_TEST='test'\n\nSENTENSE_DELIMETER=\". \"\nWORD_DELIMETER=\" \"\nLAVEL_GROUP='B-AT'\nLAVEL_I_GROUP='I-AT'\nLAVEL_TOOL='B-TL'\nLAVEL_I_TOOL='I-TL'\nLAVEL_SEC='B-SC'\nLAVEL_I_SEC='I-SC'\nLAVEL_COM='B-CM'\nLAVEL_I_COM='I-CM'\nLAVEL_OTHER='O'\nDATASET_DELIMETER=\"\\t\"\nTRAIN_FILE='train.txt'\nVAL_FILE='dev.txt'\nTEST_FILE='test.txt'\nMAX_WORD_NUM=200\nMAX_WORD=1000\nNUM_SENTENSE_PER_ROW=100\nLONG_SENTENSE='long.txt'\nO_RATE=1\nEXCLUSIVE_LIST=['at']\nLEN_RANDOM=10\nalldataset={}\n\ndef get_tools():\n tools=[]\n with open(const.OUT_TOOL_FILE, 'r') as file:\n for row in file:\n tool = row.replace(const.NEWLINE, \"\")\n #tool = tool.lower()\n tools.append(tool)\n return tools\n\ndef get_groups():\n groups=[]\n with open(const.OUT_GROUP_FILE, 'r') as file:\n for row in file:\n group = row.replace(const.NEWLINE, \"\")\n #group=group.lower()\n groups.append(group)\n return groups\n\ndef get_sectors():\n sectors=[]\n with open(const.OUT_SECTOR_FILE, 'r') as file:\n for row in file:\n sector = row.replace(const.NEWLINE, \"\")\n #sector=sector.lower()\n sectors.append(sector)\n return sectors\n\ndef get_companies():\n companies=[]\n with open(const.OUT_COMPANY_FILE, 'r') as file:\n for row in file:\n company = row.replace(const.NEWLINE, \"\")\n #company=company.lower()\n companies.append(company)\n return companies\n\ndef random_str(word):\n dat = string.digits + string.ascii_lowercase + string.ascii_uppercase\n return ''.join([random.choice(dat) for i in range(len(word))]).lower()\n\ndef get_random_TOOL(start,end):\n index=random.randint(start,end)\n tool=tools[index]\n name=tool.split(\" \")[0]\n return name\n\ndef get_random_TA(start,end):\n index=random.randint(start,end)\n ta_name=groups[index]\n name = ta_name.split(\" \")[0]\n return name\n\ndef create_dataset(mode,num_dataset, start_a, end_a, start_t, end_t):\n\n cnt=0\n\n data=[]\n data_O=[]\n data_tag = []\n\n if mode == LABEL_TRAIN:\n data=lines[:num_train-1]\n elif mode==LABEL_VAL:\n data=lines[num_train:num_train+num_val]\n\n else:\n data = lines[num_train+num_val:]\n\n for row in data:\n print(\"cnt: \"+str(cnt))\n if cnt>num_dataset:\n print(\"Exceed \"+str(num_data))\n return\n\n sentenses = row.split(SENTENSE_DELIMETER)\n #print(str(len(sentenses)))\n for sentense in sentenses:\n words= sentense.split(WORD_DELIMETER)\n if len(words) >=MAX_WORD_NUM:\n # with open(LONG_SENTENSE, \"a\", encoding='utf8') as out_sentense:\n # out_sentense.write(sentense + const.NEWLINE)\n continue\n\n len_word=0\n for word in words:\n len_word=len_word+len(word)\n if len_word >= MAX_WORD:\n continue\n\n\n prev=''\n prev_org=''\n dataset=[]\n index=0\n for word in words:\n lavel = LAVEL_OTHER\n word=word.strip()\n tmp_word = word\n\n # groups\n if tmp_word in groups:\n lavel=LAVEL_GROUP\n\n elif prev+WORD_DELIMETER+tmp_word in groups:\n lavel = LAVEL_I_GROUP\n prev_org = get_random_TA(start_a, end_a)\n dataset[index-1]=prev_org + DATASET_DELIMETER + LAVEL_GROUP + const.NEWLINE\n\n # tools\n\n elif tmp_word in tools and tmp_word.lower() not in EXCLUSIVE_LIST:\n lavel=LAVEL_TOOL\n\n elif prev + WORD_DELIMETER + tmp_word in tools:\n lavel = LAVEL_I_TOOL\n prev_org = get_random_TOOL(start_t,end_t)\n dataset[index - 1] = prev_org + DATASET_DELIMETER + LAVEL_TOOL + const.NEWLINE\n\n # # sectors\n # elif tmp_word in sectors:\n # lavel = LAVEL_SEC\n #\n # elif prev + WORD_DELIMETER + tmp_word in sectors:\n # lavel = LAVEL_I_SEC\n # dataset[index - 1] = prev_org + DATASET_DELIMETER + LAVEL_SEC + const.NEWLINE\n #\n # # companies\n # elif tmp_word in companies:\n # lavel = LAVEL_COM\n #\n # elif prev + WORD_DELIMETER + tmp_word in companies:\n # lavel = LAVEL_I_COM\n # dataset[index - 1] = prev_org + DATASET_DELIMETER + LAVEL_COM + const.NEWLINE\n\n if lavel ==LAVEL_GROUP or lavel==LAVEL_I_GROUP:\n word=get_random_TA(start_a, end_a)\n word=word\n\n elif lavel ==LAVEL_TOOL or lavel==LAVEL_I_TOOL:\n word=get_random_TOOL(start_t,end_t)\n word = word\n\n\n dataset.append(word + DATASET_DELIMETER + lavel + const.NEWLINE)\n prev=tmp_word\n prev_org=word\n index=index+1\n\n num_data=0\n for item in dataset:\n label=item.split(DATASET_DELIMETER)[1].strip()\n if label!=LAVEL_OTHER:\n num_data=num_data+1\n\n if num_data == 0:\n data_O.append(dataset)\n\n else:\n data_tag.append(dataset)\n\n cnt = cnt + 1\n\n O_num = len(data_O)\n max_O_num = int(O_num* O_RATE)\n\n alldataset[mode]=data_tag+data_O[:max_O_num]\n\n return(mode)\n\nwith open(INPUT_FILE, 'r') as file:\n lines = file.readlines()\n\n\ncontext=len(lines)\nprint(\"total context:\" +str(context))\n\nif len(sys.argv)>1:\n context = int(sys.argv[1])\n\nnum_train=round(context*TRAIN_RATE)\nnum_val=round(context*VUL_RATE)\nnum_test=context-num_train-num_val\n\nprint(\"num_train:\" +str(num_train))\nprint(\"num_val:\" +str(num_val))\nprint(\"num_test:\" +str(num_test))\n\ntools=get_tools()\ngroups=get_groups()\n# sectors=get_sectors()\n# companies=get_companies()\n\ntrain_ta_end=round(len(groups)*TRAIN_RATE)\ndev_ta_end=train_ta_end+round(len(groups)*VUL_RATE)\ntest_ta_end=len(groups)-1\n\ntrain_tl_end=round(len(tools)*TRAIN_RATE)\ndev_tl_end=train_tl_end+round(len(tools)*VUL_RATE)\ntest_tl_end=len(tools)-1\n\nif os.path.exists(TRAIN_FILE):\n os.remove(TRAIN_FILE)\n\nif os.path.exists(VAL_FILE):\n os.remove(VAL_FILE)\n\nif os.path.exists(TEST_FILE):\n os.remove(TEST_FILE)\n\nif os.path.exists(LONG_SENTENSE):\n os.remove(LONG_SENTENSE)\n\ncreate_dataset(LABEL_TRAIN, num_train,0,train_ta_end,0,train_tl_end)\ncreate_dataset(LABEL_VAL, num_val,train_ta_end+1,dev_ta_end,train_tl_end+1,dev_tl_end)\ncreate_dataset(LABEL_TEST, num_test,dev_ta_end+1,test_ta_end,dev_tl_end+1,test_tl_end)\n\nwith open(LABEL_TRAIN + '.txt', \"a\", encoding='utf8') as out:\n for dataset in alldataset[LABEL_TRAIN]:\n out.writelines(dataset)\n out.write('.' + DATASET_DELIMETER + LAVEL_OTHER + const.NEWLINE + const.NEWLINE)\n\nwith open(LABEL_VAL + '.txt', \"a\", encoding='utf8') as out:\n for dataset in alldataset[LABEL_VAL]:\n out.writelines(dataset)\n out.write('.' + DATASET_DELIMETER + LAVEL_OTHER + const.NEWLINE + const.NEWLINE)\n\nwith open(LABEL_TEST + '.txt', \"a\", encoding='utf8') as out:\n for dataset in alldataset[LABEL_TEST]:\n out.writelines(dataset)\n out.write('.' + DATASET_DELIMETER + LAVEL_OTHER + const.NEWLINE + const.NEWLINE)"},"repo_name":{"kind":"string","value":"gamzattirev/Ahogrammer"},"sub_path":{"kind":"string","value":"create_dataset.py"},"file_name":{"kind":"string","value":"create_dataset.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":7721,"string":"7,721"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"en"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":0,"string":"0"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"6"}}},{"rowIdx":379,"cells":{"seq_id":{"kind":"string","value":"44379710290"},"text":{"kind":"string","value":"from random import randint\ny=int(randint(1,10))\nfor i in range(3):\n x = int(input(\"猜数字:\\n\"))\n if x >y:\n print(\"大了\")\n elif x Output: 1\n[(0,30),(5,10),(15,20) (21 22) (21 28) ] -> Explanation: Room1: (0,30) Room2: (5,10),(15,20) -> Output: 2\n(0, 30), (0, 10), (5,15), (11, 20), (17, 25), (21,30)\n\nexamples\n(0,30), \n(5,10), (15,20), (21 22)\n(21 28)\n\n(0, 30), (0, 10), (5,15), (11, 20), (17, 25), (21,30)\nassumptions\napproaches\n1)\n(0,30), \n(5,22),\n(21 28)\n\n2)\n(0, 30), (0, 10), (5,15), (11, 20), (17, 25), (21,30)\n0 1 2 3 4 5\n0 30\ncount: 1\n\ncreate a res array\nfor any new interval, look in res for a place where int has no intersection. this space defines a room!\n\ntradeoffs\nthis appears to be the only way\n'''\nfrom typing import List, Tuple\ndef roomcount(times: List[Tuple[int, int]]) -> int:\n '''\n s1------e1\n s2-------e2\n '''\n def intersects(start1, end1, start2, end2):\n return min(end1, end2) > max(start1, start2) \n \n def no_intersects(lis):\n for int_ in lis:\n if intersects(*int_, start, end): # return true if they touch?\n return False\n return True\n \n rooms = []\n for start, end in times:\n for lis in rooms:\n if no_intersects(lis):\n lis.append((start, end))\n break\n else:\n rooms.append([(start, end)])\n return len(rooms)\n \nints = [(2,7)] # -> Output: 1\nprint(roomcount(ints))\nints = [(0,30),(5,10),(15,20), (21, 22), (21, 28) ] #3\nprint(roomcount(ints))\nints = [(0,30),(5,10),(15,20),(21, 22), (22, 28) ] #2\nprint(roomcount(ints))"},"repo_name":{"kind":"string","value":"soji-omiwade/cs"},"sub_path":{"kind":"string","value":"dsa/before_rubrik/minimum_rooms.py"},"file_name":{"kind":"string","value":"minimum_rooms.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":1937,"string":"1,937"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"en"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":0,"string":"0"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"6"}}},{"rowIdx":382,"cells":{"seq_id":{"kind":"string","value":"19040286888"},"text":{"kind":"string","value":"from typing import Dict, List, Optional, Tuple, Union\n\nimport numpy as np\nfrom rl_nav import constants\nfrom rl_nav.environments import wrapper\n\ntry:\n import cv2\n import matplotlib\n from matplotlib import cm\n from matplotlib import pyplot as plt\n from mpl_toolkits.axes_grid1 import make_axes_locatable\nexcept ModuleNotFoundError:\n raise AssertionError(\n \"To use visualisation wrapper, further package requirements \"\n \"need to be satisfied. Please consult README.\"\n )\n\n\nclass VisualisationEnv(wrapper.Wrapper):\n\n COLORMAP = cm.get_cmap(\"plasma\")\n NORMALISE = False\n\n def __init__(self, env):\n super().__init__(env=env)\n\n def render(\n self,\n save_path: Optional[str] = None,\n dpi: Optional[int] = 60,\n format: str = \"state\",\n ) -> None:\n \"\"\"Method to render environment.\n\n Args:\n save_path: optional path to which to save image.\n dpi: optional pixel.\n format: state of environment to render.\n \"\"\"\n if format == constants.STATE:\n assert (\n self._env.active\n ), \"To render map with state, environment must be active.\"\n \"call reset_environment() to reset environment and make it active.\"\n \"Else render stationary environment skeleton using format='stationary'\"\n if save_path:\n fig = plt.figure()\n plt.imshow(\n self._env._env_skeleton(\n rewards=format,\n agent=format,\n ),\n origin=\"lower\",\n )\n fig.savefig(save_path, dpi=dpi)\n else:\n plt.imshow(\n self._env._env_skeleton(\n rewards=format,\n agent=format,\n ),\n origin=\"lower\",\n )\n\n def visualise_episode_history(\n self, save_path: str, history: Union[str, List[np.ndarray]] = \"train\"\n ) -> None:\n \"\"\"Produce video of episode history.\n\n Args:\n save_path: name of file to be saved.\n history: \"train\", \"test\" to plot train or test history,\n else provide an independent history.\n \"\"\"\n if isinstance(history, str):\n if history == constants.TRAIN:\n history = self._env.train_episode_history\n elif history == constants.TEST:\n history = self._env.test_episode_history\n elif history == constants.TRAIN_PARTIAL:\n history = self._env.train_episode_partial_history\n elif history == constants.TEST_PARTIAL:\n history = self._env.test_episode_partial_history\n\n SCALING = 20\n FPS = 30\n\n map_shape = history[0].shape\n frameSize = (SCALING * map_shape[1], SCALING * map_shape[0])\n\n out = cv2.VideoWriter(\n filename=save_path,\n fourcc=cv2.VideoWriter_fourcc(\"m\", \"p\", \"4\", \"v\"),\n fps=FPS,\n frameSize=frameSize,\n )\n\n for frame in history:\n bgr_frame = frame[..., ::-1].copy()\n flipped_frame = np.flip(bgr_frame, 0)\n scaled_up_frame = np.kron(flipped_frame, np.ones((SCALING, SCALING, 1)))\n out.write((scaled_up_frame * 255).astype(np.uint8))\n\n out.release()\n\n def _plot_normalised_heatmap_over_env(\n self, heatmap: Dict[Tuple[int, int], float], save_name: str\n ):\n split_save_name = save_name.split(\".pdf\")[0]\n save_name = f\"{split_save_name}_normalised.pdf\"\n environment_map = self._env._env_skeleton(\n rewards=None,\n agent=None,\n )\n\n all_values = list(heatmap.values())\n current_max_value = np.max(all_values)\n current_min_value = np.min(all_values)\n\n for position, value in heatmap.items():\n # remove alpha from rgba in colormap return\n # normalise value for color mapping\n environment_map[position[::-1]] = self.COLORMAP(\n (value - current_min_value) / (current_max_value - current_min_value)\n )[:-1]\n\n fig = plt.figure()\n plt.imshow(environment_map, origin=\"lower\", cmap=self.COLORMAP)\n plt.colorbar()\n fig.savefig(save_name, dpi=60)\n plt.close()\n\n def _plot_unnormalised_heatmap_over_env(\n self, heatmap: Dict[Tuple[int, int], float], save_name: str\n ):\n environment_map = self._env._env_skeleton(\n rewards=None,\n agent=None,\n )\n\n for position, value in heatmap.items():\n # remove alpha from rgba in colormap return\n environment_map[position[::-1]] = self.COLORMAP(value)[:-1]\n\n fig = plt.figure()\n plt.imshow(environment_map, origin=\"lower\", cmap=self.COLORMAP)\n plt.colorbar()\n fig.savefig(save_name, dpi=60)\n plt.close()\n\n def plot_heatmap_over_env(\n self,\n heatmap: Dict[Tuple[int, int], float],\n save_name: str,\n ) -> None:\n \"\"\"plot quantities over top of environmen (e.g. value function)\n\n Args:\n heatmap: data to plot; dictionary of states (keys) and quantities (values).\n fig: figure on which to plot.\n ax: axis on which to plot.\n save_name: path to which to save plot.\n \"\"\"\n self._plot_unnormalised_heatmap_over_env(heatmap=heatmap, save_name=save_name)\n self._plot_normalised_heatmap_over_env(heatmap=heatmap, save_name=save_name)\n\n def plot_numbered_values_over_env(\n self, values: Dict[Tuple[int], np.ndarray], save_name: str\n ):\n fig = plt.figure()\n environment_map = self._env._env_skeleton(\n rewards=None,\n agent=None,\n )\n plt.imshow(environment_map, origin=\"lower\", cmap=self.COLORMAP)\n all_states = list(values.keys())\n for state, action_values in values.items():\n for i, action_value in enumerate(action_values):\n if all_states[i] != state:\n xytext = np.array(state) + 0.2 * (\n np.array(all_states[i]) - np.array(state)\n )\n plt.annotate(\n f\"{i}: {round(action_value, 2)}\",\n xy=state,\n xytext=xytext,\n arrowprops={\n \"headlength\": 2,\n \"headwidth\": 2,\n \"width\": 0.5,\n \"linewidth\": 0.1,\n },\n color=\"y\",\n size=5,\n )\n else:\n plt.annotate(\n i,\n xy=state,\n color=\"g\",\n size=5,\n )\n fig.savefig(save_name, dpi=60)\n plt.close()\n"},"repo_name":{"kind":"string","value":"philshams/Euclidean_Gridworld_RL"},"sub_path":{"kind":"string","value":"rl_nav/environments/visualisation_env.py"},"file_name":{"kind":"string","value":"visualisation_env.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":6969,"string":"6,969"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"en"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":1,"string":"1"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"6"}}},{"rowIdx":383,"cells":{"seq_id":{"kind":"string","value":"12423871357"},"text":{"kind":"string","value":"__author__ = \"Vanessa Sochat, Alec Scott\"\n__copyright__ = \"Copyright 2021-2023, Vanessa Sochat and Alec Scott\"\n__license__ = \"Apache-2.0\"\n\nimport json\nimport os\nimport re\nimport shlex\nimport subprocess\n\nimport pakages.builders.spack.cache as spack_cache\nimport pakages.client\nimport pakages.oras\nimport pakages.utils\nfrom pakages.logger import logger\n\n\nclass SpackClient(pakages.client.PakagesClient):\n \"\"\"\n Pakages has a main controller for interacting with pakages.\n \"\"\"\n\n def parse_package_request(self, packages):\n \"\"\"\n Parse the packages and repo (if any) from it.\n This is shared between install and build\n \"\"\"\n # By defualt, assume not adding a repository\n repo = None\n\n if not isinstance(packages, list):\n packages = shlex.split(packages)\n\n # Case 1: we have an install directed at the present working directory\n if packages and packages[0] == \".\":\n repo = os.getcwd()\n packages.pop(0)\n\n # If we have a path (akin to the first)\n if packages and os.path.exists(packages[0]):\n repo = packages.pop(0)\n\n # OR if we have a github URI TODO, can clone here\n if packages and re.search(\"(http|https)://github.com\", packages[0]):\n repo = packages.pop(0)\n\n # If we don't have packages and we have a repo, derive from PWD\n if repo and not packages:\n for path in pakages.utils.recursive_find(repo, \"package.py\"):\n packages.append(os.path.basename(os.path.dirname(path)))\n\n # Finally, add the repository\n if repo:\n self.add_repository(repo)\n\n return packages\n\n def list_installed(self):\n \"\"\"\n List installed packages\n \"\"\"\n command = [\"spack\", \"find\"]\n for line in pakages.utils.stream_command(command):\n print(line.strip(\"\\n\"))\n\n command = [\"spack\", \"find\", \"--json\"]\n result = pakages.utils.run_command(command)\n return json.loads(result[\"message\"])\n\n def build(self, packages, cache_dir=None, key=None, **kwargs):\n \"\"\"\n Build a package into a cache\n \"\"\"\n packages = self.parse_packages(packages)\n\n # Prepare a cache directory\n cache = spack_cache.BuildCache(\n spec_name=packages,\n cache_dir=cache_dir or self.settings.cache_dir,\n username=self.settings.username,\n email=self.settings.email,\n settings=self.settings,\n )\n\n # Install all packages\n self._install(packages)\n cache.create(packages, key=key)\n\n # Push function is on cache, if desired\n return cache\n\n def parse_packages(self, packages):\n \"\"\"\n Helper function to ensure we return consistent names.\n \"\"\"\n packages = self.parse_package_request(packages)\n if isinstance(packages, list):\n packages = packages[0]\n if \" \" in packages:\n logger.exit(\"We currently only support one package for build.\")\n logger.info(f\"Preparing package {packages}\")\n return packages\n\n def add_repository(self, path):\n \"\"\"\n Add a repository.\n\n Given a path that exists, add the repository to the\n underlying spack. If you need to add a GitHub uri, create a\n pakages.repo.PakRepo first.\n \"\"\"\n try:\n command = [\"spack\", \"repo\", \"add\", path]\n for line in pakages.utils.stream_command(command):\n logger.info(line.strip(\"\\n\"))\n except subprocess.CalledProcessError as e:\n if \"Repository is already registered\" in e.output:\n pass\n else:\n raise e\n\n def download_cache(self, target, download_dir=None):\n \"\"\"\n Download a target to a cache download directory\n \"\"\"\n download_dir = download_dir or pakages.utils.get_tmpdir()\n reg = pakages.oras.get_oras_client()\n\n # This will error if not successful, result is a list of files\n reg.pull(target=target, outdir=download_dir)\n return download_dir\n\n def install(self, packages, **kwargs):\n \"\"\"\n Install one or more packages.\n \"\"\"\n packages = self.parse_packages(packages)\n use_cache = kwargs.get(\"use_cache\", False)\n if use_cache:\n cache_dir = self.download_cache(use_cache)\n cache = spack_cache.BuildCache(\n packages, cache_dir=cache_dir, settings=self.settings\n )\n\n # Cache is named after target, this is a filesystem mirror\n cache.add_as_mirror(re.sub(\"(-|:|/)\", \"-\", use_cache))\n\n # Prepare install command with or without cache\n command = [\"spack\", \"install\"]\n if use_cache:\n command.append(\"--use-cache\")\n if isinstance(packages, list):\n command.append(\" \".join(packages))\n else:\n command.append(packages)\n\n # Install packages using system spack - we aren't responsible for this working\n for line in pakages.utils.stream_command(command):\n logger.info(line.strip(\"\\n\"))\n\n def _install(self, packages):\n \"\"\"\n Install one or more packages.\n\n This eventually needs to take into account using the GitHub packages bulid cache\n \"\"\"\n # Install packages using system spack - we aren't responsible for this working\n for line in pakages.utils.stream_command([\"spack\", \"install\", packages]):\n logger.info(line.strip(\"\\n\"))\n\n def uninstall(self, packages):\n \"\"\"\n Uninstall a spack package\n \"\"\"\n for line in pakages.utils.stream_command([\"spack\", \"uninstall\", packages]):\n logger.info(line.strip(\"\\n\"))\n"},"repo_name":{"kind":"string","value":"syspack/pakages"},"sub_path":{"kind":"string","value":"pakages/builders/spack/client.py"},"file_name":{"kind":"string","value":"client.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":5794,"string":"5,794"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"en"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":2,"string":"2"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"6"}}},{"rowIdx":384,"cells":{"seq_id":{"kind":"string","value":"13914723162"},"text":{"kind":"string","value":"import sys\nimport oneflow as flow\nimport oneflow.typing as tp\nimport argparse\nimport numpy as np\nimport os\nimport shutil\nimport json\nfrom typing import Tuple\n\nfrom textcnn import TextCNN\n\nsys.path.append(\"../..\")\nfrom text_classification.utils import pad_sequences, load_imdb_data\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--ksize_list', type=str, default='2,3,4,5')\nparser.add_argument('--n_filters', type=int, default=100)\nparser.add_argument('--emb_dim', type=int, default=100)\nparser.add_argument('--dropout', type=float, default=0.5)\nparser.add_argument('--lr', type=float, default=1e-4)\nparser.add_argument('--sequence_length', type=int, default=150)\nparser.add_argument('--batch_size', type=int, default=32)\nparser.add_argument('--model_load_dir', type=str, default='')\nparser.add_argument('--model_save_every_n_iter', type=int, default=1000)\nparser.add_argument('--n_steps', type=int, default=10000)\nparser.add_argument('--n_epochs', type=int, default=15)\nparser.add_argument('--model_save_dir', type=str, default='./best_model')\n\nargs = parser.parse_args()\nassert ',' in args.ksize_list\nargs.ksize_list = [int(n) for n in args.ksize_list.split(',')]\nargs.emb_num = 50000\nargs.n_classes = 2\n\nmodel = TextCNN(\n args.emb_num, args.emb_dim,\n ksize_list=args.ksize_list,\n n_filters_list=[args.n_filters] * len(args.ksize_list),\n n_classes=args.n_classes, dropout=args.dropout)\n\n\ndef get_train_config():\n config = flow.function_config()\n config.default_data_type(flow.float)\n return config\n\n\ndef get_eval_config():\n config = flow.function_config()\n config.default_data_type(flow.float)\n return config\n\n\n@flow.global_function('train', get_train_config())\ndef train_job(text: tp.Numpy.Placeholder((args.batch_size, args.sequence_length), dtype=flow.int32),\n label: tp.Numpy.Placeholder((args.batch_size,), dtype=flow.int32)\n ) -> tp.Numpy:\n with flow.scope.placement(\"gpu\", \"0:0\"):\n logits = model.get_logits(text, is_train=True)\n loss = flow.nn.sparse_softmax_cross_entropy_with_logits(label, logits, name=\"softmax_loss\")\n lr_scheduler = flow.optimizer.PiecewiseConstantScheduler([], [args.lr])\n flow.optimizer.Adam(lr_scheduler).minimize(loss)\n return loss\n\n\n@flow.global_function('predict', get_eval_config())\ndef eval_job(text: tp.Numpy.Placeholder((args.batch_size, args.sequence_length), dtype=flow.int32),\n label: tp.Numpy.Placeholder((args.batch_size,), dtype=flow.int32)\n ) -> Tuple[tp.Numpy, tp.Numpy]:\n with flow.scope.placement(\"gpu\", \"0:0\"):\n logits = model.get_logits(text, is_train=False)\n loss = flow.nn.sparse_softmax_cross_entropy_with_logits(label, logits, name=\"softmax_loss\")\n\n return label, logits\n\n\ndef suffle_batch(data, label, batch_size):\n permu = np.random.permutation(len(data))\n data, label = data[permu], label[permu]\n\n batch_n = len(data) // batch_size\n\n x_batch = np.array([data[i * batch_size:i * batch_size + batch_size] for i in range(batch_n)], dtype=np.int32)\n y_batch = np.array([label[i * batch_size:i * batch_size + batch_size] for i in range(batch_n)], dtype=np.int32)\n\n return x_batch, y_batch\n\n\ndef acc(labels, logits, g):\n predictions = np.argmax(logits, 1)\n right_count = np.sum(predictions == labels)\n g[\"total\"] += labels.shape[0]\n g[\"correct\"] += right_count\n\n\ndef train(checkpoint):\n path = '../imdb'\n (train_data, train_labels), (test_data, test_labels) = load_imdb_data(path)\n\n with open(os.path.join(path, 'word_index.json')) as f:\n word_index = json.load(f)\n word_index = {k: (v + 2) for k, v in word_index.items()}\n word_index[\"\"] = 0\n word_index[\"\"] = 1\n\n train_data = pad_sequences(train_data, value=word_index[\"\"], padding='post', maxlen=args.sequence_length)\n test_data = pad_sequences(test_data, value=word_index[\"\"], padding='post', maxlen=args.sequence_length)\n\n best_accuracy = 0.0\n best_epoch = 0\n for epoch in range(1, args.n_epochs + 1):\n print(\"[Epoch:{}]\".format(epoch))\n data, label = suffle_batch(train_data, train_labels, args.batch_size)\n for i, (texts, labels) in enumerate(zip(data, label)):\n loss = train_job(texts, labels).mean()\n if i % 20 == 0:\n print(loss)\n\n data, label = suffle_batch(test_data, test_labels, args.batch_size)\n g = {\"correct\": 0, \"total\": 0}\n for i, (texts, labels) in enumerate(zip(data, label)):\n labels, logits = eval_job(texts, labels)\n acc(labels, logits, g)\n\n accuracy = g[\"correct\"] * 100 / g[\"total\"]\n print(\"[Epoch:{0:d} ] accuracy: {1:.1f}%\".format(epoch, accuracy))\n if accuracy > best_accuracy:\n best_accuracy = accuracy\n best_epoch = epoch\n if not os.path.exists(args.model_save_dir):\n os.mkdir(args.model_save_dir)\n else:\n shutil.rmtree(args.model_save_dir)\n assert not os.path.exists(args.model_save_dir)\n os.mkdir(args.model_save_dir)\n print(\"Epoch:{} save best model.\".format(best_epoch))\n checkpoint.save(args.model_save_dir)\n\n print(\"Epoch:{} get best accuracy:{}\".format(best_epoch, best_accuracy))\n\n\nif __name__ == '__main__':\n checkpoint = flow.train.CheckPoint()\n checkpoint.init()\n train(checkpoint)\n"},"repo_name":{"kind":"string","value":"Oneflow-Inc/oneflow_nlp_model"},"sub_path":{"kind":"string","value":"text_classification/textcnn/train_textcnn.py"},"file_name":{"kind":"string","value":"train_textcnn.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":5411,"string":"5,411"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"en"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":0,"string":"0"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"6"}}},{"rowIdx":385,"cells":{"seq_id":{"kind":"string","value":"8246901300"},"text":{"kind":"string","value":"\"\"\"\nModule containing the rheologies, fault setup, and ODE cycles code\nfor the 2D subduction case.\n\"\"\"\n\n# general imports\nimport json\nimport configparser\nimport numpy as np\nimport pandas as pd\nfrom scipy.integrate import solve_ivp\nfrom numba import njit, objmode, float64, int64, boolean\nfrom scipy.interpolate import interp1d\nfrom warnings import warn\nfrom abc import ABC\n\n# seqeas imports\nfrom .kernels2d import Glinedisp, Klinedisp\n\n\nclass Rheology(ABC):\n \"\"\"\n Abstract base class for rheologies.\n \"\"\"\n\n\nclass NonlinearViscous(Rheology):\n r\"\"\"\n Implement a nonlinear viscous fault rheology, where the velocity :math:`v` is\n :math:`v = \\tau^n / \\alpha_n` given the shear stress :math:`\\tau`, a strength\n constant :math:`\\alpha_n`, and a constant exponent :math:`n`.\n \"\"\"\n\n def __init__(self, n, alpha_n, n_mid=None, alpha_n_mid=None, mid_transition=None,\n n_deep=None, alpha_n_deep=None, deep_transition=None,\n deep_transition_width=None, n_boundary=None, alpha_n_boundary=None):\n r\"\"\"\n Setup the rheology parameters for a given fault.\n\n Parameters\n ----------\n alpha_n : float\n Nonlinear viscous rheology strength constant :math:`\\alpha_n` [Pa^n * s/m]\n n : float\n Power-law exponent :math:`n` [-]\n \"\"\"\n # input check\n assert not np.logical_xor(deep_transition is None, deep_transition_width is None)\n # set number of variables\n self.n_vars = 2\n \"\"\" Number of variables to track by rheology [-] \"\"\"\n # initialization\n self._n = float(n)\n self._n_mid = float(n_mid) if n_mid is not None else self.n\n self._n_deep = float(n_deep) if n_deep is not None else self.n_mid\n self.n_boundary = float(n_boundary) if n_boundary is not None else self.n_deep\n \"\"\" Power-law exponent :math:`n` [-] \"\"\"\n self.alpha_n = float(alpha_n)\n self.alpha_n_mid = (float(alpha_n_mid) if alpha_n_mid is not None\n else self.alpha_n)\n self.alpha_n_deep = (float(alpha_n_deep) if alpha_n_deep is not None\n else self.alpha_n_mid)\n self.alpha_n_boundary = (float(alpha_n_boundary) if alpha_n_boundary is not None\n else self.alpha_n_deep)\n r\"\"\" Nonlinear viscous rheology strength constant :math:`\\alpha_n` [Pa^n * s/m] \"\"\"\n self.mid_transition = None if mid_transition is None else float(mid_transition)\n \"\"\" Depth [m] for the middle transition point \"\"\"\n self.deep_transition = None if deep_transition is None else float(deep_transition)\n \"\"\" (Upper) Depth [m] for the deep transition point \"\"\"\n self.deep_transition_width = (None if deep_transition_width is None\n else float(deep_transition_width))\n \"\"\" (Downdip) Width [m] of the deep transition point \"\"\"\n\n @property\n def alpha_n(self):\n r\"\"\" Nonlinear viscous rheology strength constant :math:`\\alpha_n` [Pa^n * s/m] \"\"\"\n return self._alpha_n\n\n @alpha_n.setter\n def alpha_n(self, alpha_n):\n self._alpha_n = float(alpha_n)\n self._A = self.calc_A(self._alpha_n, self._n)\n\n @property\n def alpha_n_mid(self):\n r\"\"\" Nonlinear viscous rheology strength constant :math:`\\alpha_n` [Pa^n * s/m] \"\"\"\n return self._alpha_n_mid\n\n @alpha_n_mid.setter\n def alpha_n_mid(self, alpha_n_mid):\n self._alpha_n_mid = float(alpha_n_mid)\n self._A_mid = self.calc_A(self._alpha_n_mid, self._n_mid)\n\n @property\n def alpha_n_deep(self):\n r\"\"\" Nonlinear viscous rheology strength constant :math:`\\alpha_n` [Pa^n * s/m] \"\"\"\n return self._alpha_n_deep\n\n @alpha_n_deep.setter\n def alpha_n_deep(self, alpha_n_deep):\n self._alpha_n_deep = float(alpha_n_deep)\n self._A_deep = self.calc_A(self._alpha_n_deep, self._n_deep)\n\n @property\n def n(self):\n \"\"\" Power-law exponent :math:`n` [-] \"\"\"\n return self._n\n\n @n.setter\n def n(self, n):\n self._n = float(n)\n self._A = self.calc_A(self._alpha_n, self._n)\n\n @property\n def n_mid(self):\n \"\"\" Power-law exponent :math:`n` [-] \"\"\"\n return self._n_mid\n\n @n_mid.setter\n def n_mid(self, n_mid):\n self._n_mid = float(n_mid)\n self._A_mid = self.calc_A(self._alpha_n_mid, self._n_mid)\n\n @property\n def n_deep(self):\n \"\"\" Power-law exponent :math:`n` [-] \"\"\"\n return self._n_deep\n\n @n_deep.setter\n def n_deep(self, n_deep):\n self._n_deep = float(n_deep)\n self._A_deep = self.calc_A(self._alpha_n_deep, self._n_deep)\n\n @property\n def A(self):\n r\"\"\" Rescaled strength term :math:`A = \\alpha_n^{1/n}` [Pa * (s/m)^(1/n)] \"\"\"\n return self._A\n\n @property\n def A_mid(self):\n r\"\"\" Rescaled strength term :math:`A = \\alpha_n^{1/n}` [Pa * (s/m)^(1/n)] \"\"\"\n return self._A_mid\n\n @property\n def A_deep(self):\n r\"\"\" Rescaled strength term :math:`A = \\alpha_n^{1/n}` [Pa * (s/m)^(1/n)] \"\"\"\n return self._A_deep\n\n @staticmethod\n def calc_A(alpha_n, n):\n \"\"\" Calculate A from alpha_n and n \"\"\"\n return alpha_n ** (1 / n)\n\n def get_param_vectors(self, patch_depths, v_eff):\n r\"\"\"\n Calculate the depth-dependent arrays of :math:`\\alpha_n`, :math:`n`, and :math:`A`,\n assuming :math:`\\alpha_n` and :math:`\\alpha_{n,eff}` vary log-linearly with depth,\n and :math:`n` adapts between the transition points.\n \"\"\"\n assert np.all(np.diff(patch_depths) >= 0)\n # start knots list\n knots = [patch_depths[0]]\n vals_alpha_n = [self.alpha_n]\n vals_n = [self.n]\n # add optional mid transition\n if self.mid_transition is not None:\n knots.append(patch_depths[np.argmin(np.abs(patch_depths - self.mid_transition))])\n vals_alpha_n.append(self.alpha_n_mid)\n vals_n.append(self.n_mid)\n # add optional deep transition\n if self.deep_transition is not None:\n knots.append(patch_depths[np.argmin(np.abs(patch_depths - self.deep_transition))])\n vals_alpha_n.append(self.alpha_n_deep)\n vals_n.append(self.n_deep)\n knots.append(patch_depths[np.argmin(np.abs(patch_depths\n - self.deep_transition\n - self.deep_transition_width))])\n vals_alpha_n.append(self.alpha_n_boundary)\n vals_n.append(self.n_boundary)\n # add final value\n knots.append(patch_depths[-1])\n vals_alpha_n.append(self.alpha_n_boundary)\n vals_alpha_n = np.array(vals_alpha_n)\n vals_n.append(self.n_boundary)\n vals_n = np.array(vals_n)\n vals_alpha_eff = SubductionSimulation.get_alpha_eff(vals_alpha_n, vals_n, v_eff)\n # interpolate alpha_n and alpha_eff\n alpha_n_vec = 10**interp1d(knots, np.log10(vals_alpha_n))(patch_depths)\n alpha_eff_vec = 10**interp1d(knots, np.log10(vals_alpha_eff))(patch_depths)\n # get n and A\n n_vec = SubductionSimulation.get_n(alpha_n_vec, alpha_eff_vec, v_eff)\n A_vec = alpha_n_vec ** (1 / n_vec)\n return alpha_n_vec, n_vec, A_vec\n\n\nclass RateStateSteadyLogarithmic(Rheology):\n r\"\"\"\n Implement a steady-state rate-and-state rheology using the ageing law (effectively\n becoming a rate-dependent rheology) with velocity in logarithmic space defined by\n\n :math:`f_{ss} = f_0 + (a - b) * \\zeta = \\tau / \\sigma_E`\n\n where :math:`f_{ss}` is the steady-state friction, :math:`f_0` is a reference\n friction, :math:`a` and :math:`b` are the rate-and-state frictional parameters,\n :math:`\\zeta = \\log (v / v_0)` is the logarithmic velocity, :math:`\\tau` is the shear\n stress, and :math:`\\sigma_E` is the effective fault normal stress.\n \"\"\"\n\n def __init__(self, v_0, alpha_h, alpha_h_mid=None, mid_transition=None,\n alpha_h_deep=None, deep_transition=None, deep_transition_width=None,\n alpha_h_boundary=None):\n r\"\"\"\n Setup the rheology parameters for a given fault.\n\n Parameters\n ----------\n v_0 : float\n Reference velocity [m/s] used for the transformation into logarithmic space.\n alpha_h : float\n Rate-and-state parameter :math:`(a - b) * \\sigma_E`,\n where :math:`a` and :math:`b` [-] are the rate-and-state frictional properties,\n and :math:`\\sigma_E` [Pa] is effective fault normal stress.\n \"\"\"\n self.alpha_h = float(alpha_h)\n r\"\"\" Rate-and-state parameter :math:`(a - b) * \\sigma_E` [Pa] \"\"\"\n # input check\n assert not np.logical_xor(deep_transition is None, deep_transition_width is None)\n assert float(v_0) > 0, \"RateStateSteadyLogarithmic needs to have positive v_0.\"\n # set number of variables\n self.n_vars = 2\n \"\"\" Number of variables to track by rheology [-] \"\"\"\n # initialization\n self.v_0 = float(v_0)\n \"\"\" Reference velocity :math:`v_0` [m/s] \"\"\"\n self.alpha_h = float(alpha_h)\n r\"\"\" Rate-and-state parameter :math:`(a - b) * \\sigma_E` [Pa] \"\"\"\n self.alpha_h_mid = (float(alpha_h_mid) if alpha_h_mid is not None\n else self.alpha_h)\n r\"\"\" Middle rate-and-state parameter :math:`(a - b) * \\sigma_E` [Pa] \"\"\"\n self.alpha_h_deep = (float(alpha_h_deep) if alpha_h_deep is not None\n else self.alpha_h_mid)\n r\"\"\" Deep rate-and-state parameter :math:`(a - b) * \\sigma_E` [Pa] \"\"\"\n self.alpha_h_boundary = (float(alpha_h_boundary) if alpha_h_boundary is not None\n else self.alpha_h_deep)\n r\"\"\" Boundary-layer rate-and-state parameter :math:`(a - b) * \\sigma_E` [Pa] \"\"\"\n self.mid_transition = None if mid_transition is None else float(mid_transition)\n \"\"\" Depth [m] for the middle transition point \"\"\"\n self.deep_transition = None if deep_transition is None else float(deep_transition)\n \"\"\" (Upper) Depth [m] for the deep transition point \"\"\"\n self.deep_transition_width = (None if deep_transition_width is None\n else float(deep_transition_width))\n \"\"\" (Downdip) Width [m] of the deep transition point \"\"\"\n\n def get_param_vectors(self, patch_depths):\n r\"\"\"\n Calculate the depth-dependent array of :math:`\\alpha_h`, assuming it\n varies log-linearly with depth.\n \"\"\"\n assert np.all(np.diff(patch_depths) >= 0)\n # start knots list\n knots = [patch_depths[0]]\n vals_alpha_h = [self.alpha_h]\n # add optional mid transition\n if self.mid_transition is not None:\n knots.append(patch_depths[np.argmin(np.abs(patch_depths - self.mid_transition))])\n vals_alpha_h.append(self.alpha_h_mid)\n # add optional deep transition\n if self.deep_transition is not None:\n knots.append(patch_depths[np.argmin(np.abs(patch_depths - self.deep_transition))])\n vals_alpha_h.append(self.alpha_h_deep)\n knots.append(patch_depths[np.argmin(np.abs(patch_depths\n - self.deep_transition\n - self.deep_transition_width))])\n vals_alpha_h.append(self.alpha_h_boundary)\n # add final value\n knots.append(patch_depths[-1])\n vals_alpha_h.append(self.alpha_h_boundary)\n vals_alpha_h = np.array(vals_alpha_h)\n # interpolate alpha_n and alpha_eff\n alpha_h_vec = 10**interp1d(knots, np.log10(vals_alpha_h))(patch_depths)\n return alpha_h_vec\n\n\n@njit(float64[:](float64[:], float64[:], float64[:], float64[:]), cache=True)\ndef dvdt_plvis(dtaudt, v, A, n):\n r\"\"\"\n Calculate the velocity derivative for a power-law viscous rheology.\n\n From :math:`v = \\tau^n / \\alpha_n` we get:\n\n :math:`\\frac{dv}{dt} = \\frac{n}{\\alpha_n} \\tau^{n-1} \\frac{d \\tau}{dt}`\n\n where\n\n :math:`\\tau^{n-1} = \\left( \\alpha_n v \\right)^{\\frac{n-1}{n}}`\n\n simplifying to\n\n :math:`\\frac{dv}{dt} = \\frac{n}{A} v^{1-\\frac{1}{n}} \\frac{d \\tau}{dt}`\n\n Parameters\n ----------\n dtaudt : numpy.ndarray\n 1D array of the shear stress derivative\n v : numpy.ndarray\n 1D array of the current velocity\n A : numpy.ndarray\n Rescaled nonlinear viscous rheology strength constant\n n : numpy.ndarray\n Power-law exponent\n\n Returns\n -------\n dvdt : numpy.ndarray\n 1D array of the velocity derivative.\n \"\"\"\n signs = np.sign(v)\n return (n / A) * (signs * v)**(1 - 1 / n) * dtaudt\n\n\n@njit(float64[:](float64[:], float64[:]), cache=True)\ndef dzetadt_rdlog(dtaudt, alpha_h_vec):\n r\"\"\"\n Return the velocity derivative in logarithmic space given the current traction\n rate in linear space.\n\n Taking the derivative of the steady-state friction gives an explicit\n formulation for the slip acceleration :math:`\\frac{d \\zeta}{dt}`:\n\n :math:`\\frac{df_{ss}}{dt} = (a-b) \\frac{d \\zeta}{dt}`\n\n Recognizing that :math:`\\tau = f_{ss} \\sigma_E` and assuming\n constant effective normal stress leads to\n :math:`\\frac{d \\tau}{dt} = \\sigma_E \\frac{df_{ss}}{dt}`, which\n can be rearranged to give the final expression\n\n :math:`\\frac{d \\zeta}{dt} = \\frac{1}{(a-b) \\sigma_E} \\frac{d \\tau}{dt}`\n\n Parameters\n ----------\n dtaudt : numpy.ndarray\n Traction derivative :math:`\\frac{d \\tau}{dt}` [Pa/s] in linear space\n alpha_h_vec : float\n Rate-and-state parameter :math:`(a - b) * \\sigma_E`\n\n Returns\n -------\n dzetadt : numpy.ndarray\n Velocity derivative :math:`\\frac{d \\zeta}{dt}` [1/s] in logarithmic space.\n \"\"\"\n return dtaudt / alpha_h_vec\n\n\n@njit(float64[:](float64[:], float64[:], float64[:], float64[:], float64[:]), cache=True)\ndef get_new_vel_plvis(v_minus, delta_tau, alpha_n, n, A):\n r\"\"\"\n Calculate the instantaneous velocity change due to an instantaneous stress change\n to the fault patches. It is derived from:\n\n :math:`\\tau_{+} = \\tau_{-} + \\Delta \\tau`\n\n and plugging in the relationship :math:`v = \\tau^n / \\alpha_n`, we get\n\n :math:`\\sqrt[n]{\\alpha_n v_{+}} = \\sqrt[n]{\\alpha_n v_{-}} + \\Delta \\tau`\n\n and finally\n\n :math:`v_{+} = \\frac{\\left( A \\sqrt[n]{v_{-}} + \\Delta \\tau \\right)^n}{\\alpha_n}`\n\n Parameters\n ----------\n v_minus : numpy.ndarray\n Initial velocity :math:`v_{-}` [m/s]\n delta_tau : numpy.ndarray\n Traction stress change :math:`\\Delta \\tau` [Pa]\n alpha_n : numpy.ndarray\n Nonlinear viscous rheology strength constant :math:`\\alpha_n` [Pa^n * s/m]\n n : numpy.ndarray\n Power-law exponent :math:`n` [-]\n A : numpy.ndarray\n Rescaled strength term :math:`A = \\alpha_n^{1/n}` [Pa * (s/m)^(1/n)]\n\n Returns\n -------\n v_plus : numpy.ndarray\n Velocity :math:`v_{+}` [m/s] after stress change\n \"\"\"\n signs = np.sign(v_minus)\n temp = A * (signs * v_minus)**(1 / n) + (signs * delta_tau)\n return np.abs(temp) ** (n - 1) * temp / alpha_n * signs\n\n\n@njit(float64[:](float64[:], float64[:], float64[:]), cache=True)\ndef get_new_vel_rdlog(zeta_minus, delta_tau, alpha_h_vec):\n r\"\"\"\n Calculate the instantaneous velocity change (in logarithmic space) due to an\n instantaneous stress change to the fault patches. We can kickstart the\n derivatuion from the expression in ``RateStateSteadyLinear.get_new_vel``:\n\n :math:`\\log (v_{+}/v_0) = \\log (v_{-}/v_0) + \\Delta\\tau / \\alpha_h`\n\n and realize that we only have to plug in our definition for :math:`\\zeta`\n to give us the final result\n\n :math:`\\zeta_{+} = \\zeta_{-} + \\Delta\\tau / \\alpha_h`\n\n Parameters\n ----------\n zeta_minus : numpy.ndarray\n Initial velocity :math:`\\zeta_{-}` [-] in logarithmic space\n delta_tau : numpy.ndarray, optional\n Traction stress change :math:`\\Delta \\tau` [Pa]\n alpha_h_vec : numpy.ndarray\n Rate-and-state parameter :math:`(a - b) * \\sigma_E`\n\n Returns\n -------\n zeta_plus : numpy.ndarray\n Velocity :math:`\\zeta_{+}` [-] in logarithmic space after stress change\n\n See Also\n --------\n alpha_h\n \"\"\"\n return zeta_minus + delta_tau / alpha_h_vec\n\n\n@njit(float64[:](float64, float64[:], float64, float64[:, ::1], float64[:, ::1],\n float64[:], float64[:], float64), cache=True)\ndef flat_ode_plvis(t, state, v_plate, K_int, K_ext, A_upper, n_upper, mu_over_2vs):\n r\"\"\"\n Flattened ODE derivative function for a subduction fault with\n powerlaw-viscous rheology in the upper plate interface, and an imposed\n constant plate velocity at the lower interface (which can be ignored).\n\n Parameters\n ----------\n t : float\n Current time (needs to be in function call for solve_ivp).\n state : numpy.ndarray\n 1D array with the current state of the creeping fault patches,\n containing (in order) the upper cumulative slip and upper velocity.\n v_plate : float\n Plate velocity.\n K_int : numpy.ndarray\n 2D array with the stress kernel mapping creeping patches to themselves.\n K_ext : numpy.ndarray\n 2D array with the stress kernel mapping the effect of the locked\n patches onto the creeping patches.\n A_upper : numpy.ndarray\n Upper plate interface rescaled nonlinear viscous rheology strength constant\n n_upper : numpy.ndarray\n Upper plate interface power-law exponent\n mu_over_2vs : float\n Radiation damping factor\n\n Returns\n -------\n dstatedt : numpy.ndarray\n 1D array with the state derivative.\n \"\"\"\n # get number of variables within state\n # (depends on rheology, so is hardcoded here)\n n_vars_upper = 2\n n_creeping_upper = state.size // n_vars_upper\n assert K_int.shape == (n_creeping_upper, n_creeping_upper)\n assert K_ext.shape[0] == n_creeping_upper\n # extract total velocities\n v = state[n_creeping_upper:]\n # get shear strain rate\n signs = np.sign(v)\n temp = mu_over_2vs * (n_upper / A_upper) * (signs * v)**(1 - 1 / n_upper)\n dtaudt = (K_int @ (v - v_plate) - np.sum(K_ext * v_plate, axis=1)\n ) / (1 + temp)\n # get ODE\n dstatedt = np.concatenate((v, dvdt_plvis(dtaudt, v, A_upper, n_upper)))\n # return\n return dstatedt\n\n\n@njit(float64[:](float64, float64[:], float64, float64[:, ::1], float64[:, ::1],\n float64, float64[:], float64), cache=True)\ndef flat_ode_rdlog(t, state, v_plate, K_int, K_ext, v_0, alpha_h_vec, mu_over_2vs):\n r\"\"\"\n Flattened ODE derivative function for a subduction fault with\n powerlaw-viscous rheology in the upper plate interface, and an imposed\n constant plate velocity at the lower interface (which can be ignored).\n\n Parameters\n ----------\n t : float\n Current time (needs to be in function call for solve_ivp).\n state : numpy.ndarray\n 1D array with the current state of the creeping fault patches,\n containing (in order) the upper cumulative slip and upper velocity.\n v_plate : float\n Plate velocity.\n K_int : numpy.ndarray\n 2D array with the stress kernel mapping creeping patches to themselves.\n K_ext : numpy.ndarray\n 2D array with the stress kernel mapping the effect of the locked\n patches onto the creeping patches.\n v_0 : float\n Reference velocity [m/s]\n alpha_h_vec : numpy.ndarray\n Rate-and-state parameter :math:`(a - b) * \\sigma_E`\n mu_over_2vs : float\n Radiation damping factor\n\n Returns\n -------\n dstatedt : numpy.ndarray\n 1D array with the state derivative.\n \"\"\"\n # get number of variables within state\n # (depends on rheology, so is hardcoded here)\n n_vars_upper = 2\n n_creeping_upper = state.size // n_vars_upper\n assert K_int.shape == (n_creeping_upper, n_creeping_upper)\n assert K_ext.shape[0] == n_creeping_upper\n # extract total velocities\n zeta = state[n_creeping_upper:]\n v = v_0 * np.exp(zeta)\n # get shear strain rate\n temp = mu_over_2vs * v / alpha_h_vec\n dtaudt = (K_int @ (v - v_plate) - np.sum(K_ext * v_plate, axis=1)\n ) / (1 + temp)\n # get ODE\n dstatedt = np.concatenate((v, dzetadt_rdlog(dtaudt, alpha_h_vec)))\n # return\n return dstatedt\n\n\n@njit(float64[:](float64, float64[:], int64, float64[:], float64[:, ::1], float64[:, ::1],\n float64, float64, float64, float64), cache=True)\ndef flat_ode_plvis_plvis(t, state, n_creeping_upper, v_plate_vec, K_int, K_ext,\n A_upper, n_upper, A_lower, n_lower):\n \"\"\"\n Flattened ODE derivative function for a subduction fault with\n powerlaw-viscous rheology in both the upper and lower plate interface.\n\n Parameters\n ----------\n t : float\n Current time (needs to be in function call for solve_ivp).\n state : numpy.ndarray\n 1D array with the current state of the creeping fault patches,\n containing (in order) the upper cumulative slip, upper velocity,\n lower cumulative slip, lower velocity.\n n_creeping_upper : int\n Number of creeping patches in the upper plate interface.\n The number of creeping patches in the lower plate interface can then\n be derived from the size of ``state``.\n v_plate_vec : float\n Initial velocity in all creeping patches.\n K_int : numpy.ndarray\n 2D array with the stress kernel mapping creeping patches to themselves.\n K_ext : numpy.ndarray\n 2D array with the stress kernel mapping the effect of the locked\n patches onto the creeping patches.\n A_upper : float\n Upper plate interface rescaled nonlinear viscous rheology strength constant\n n_upper : float\n Upper plate interface power-law exponent\n A_lower : float\n Lower plate interface rescaled nonlinear viscous rheology strength constant\n n_lower : float\n Lower plate interface power-law exponent\n\n Returns\n -------\n dstatedt : numpy.ndarray\n 1D array with the state derivative.\n \"\"\"\n # get number of variables within state\n # (depends on rheology, so is hardcoded here)\n n_vars_upper, n_vars_lower = 2, 2\n n_state_upper = n_vars_upper * n_creeping_upper\n n_state_lower = state.size - n_state_upper\n n_creeping_lower = n_state_lower // n_vars_lower\n n_creeping = n_creeping_lower + n_creeping_upper\n assert K_int.shape[0] == K_int.shape[1] == n_creeping\n assert K_ext.shape[0] == n_creeping\n # split up state\n state_upper = state[:n_state_upper]\n state_lower = state[n_state_upper:]\n # extract total velocities\n v_upper = state_upper[n_creeping_upper:]\n v_lower = state_lower[n_creeping_lower:]\n # get shear strain rate\n v = np.concatenate((v_upper, v_lower))\n dtaudt = (K_int @ (v - v_plate_vec) - np.sum(K_ext * v_plate_vec[0], axis=1))\n dtaudt_upper = dtaudt[:n_creeping_upper]\n dtaudt_lower = dtaudt[n_creeping_upper:]\n # get individual rheologies' ODE\n dstatedt_upper = \\\n np.concatenate((v_upper, dvdt_plvis(dtaudt_upper, v_upper,\n np.ones_like(v_upper) * A_upper,\n np.ones_like(v_upper) * n_upper)))\n dstatedt_lower = \\\n np.concatenate((v_lower, dvdt_plvis(dtaudt_lower, v_lower,\n np.ones_like(v_lower) * A_lower,\n np.ones_like(v_upper) * n_lower)))\n # concatenate and return\n return np.concatenate((dstatedt_upper, dstatedt_lower))\n\n\n@njit(float64[:](float64, float64[:], int64, float64[:], float64[:, ::1], float64[:, ::1],\n float64, float64, float64, float64), cache=True)\ndef flat_ode_rdlog_plvis(t, state, n_creeping_upper, v_plate_vec, K_int, K_ext,\n v_0, alpha_h_upper, A_lower, n_lower):\n r\"\"\"\n Flattened ODE derivative function for a subduction fault with\n rate-dependent (log-space) rheology in the upper and nonlinear viscous\n rheology in the lower plate interface.\n\n Parameters\n ----------\n t : float\n Current time (needs to be in function call for solve_ivp).\n state : numpy.ndarray\n 1D array with the current state of the creeping fault patches,\n containing (in order) the upper cumulative slip, upper velocity,\n lower cumulative slip, lower velocity.\n n_creeping_upper : int\n Number of creeping patches in the upper plate interface.\n The number of creeping patches in the lower plate interface can then\n be derived from the size of ``state``.\n v_plate_vec : float\n Initial velocity in all creeping patches.\n K_int : numpy.ndarray\n 2D array with the stress kernel mapping creeping patches to themselves.\n K_ext : numpy.ndarray\n 2D array with the stress kernel mapping the effect of the locked\n patches onto the creeping patches.\n v_0 : float\n Reference velocity [m/s]\n alpha_h_upper : float\n Upper interface rate-and-state parameter :math:`(a - b) * \\sigma_E` [Pa]\n A_lower : float\n Lower plate interface rescaled nonlinear viscous rheology strength constant\n n_lower : float\n Lower plate interface power-law exponent\n\n Returns\n -------\n dstatedt : numpy.ndarray\n 1D array with the state derivative.\n \"\"\"\n # get number of variables within state\n # (depends on rheology, so is hardcoded here)\n n_vars_upper, n_vars_lower = 2, 2\n n_state_upper = n_vars_upper * n_creeping_upper\n n_state_lower = state.size - n_state_upper\n n_creeping_lower = n_state_lower // n_vars_lower\n n_creeping = n_creeping_lower + n_creeping_upper\n assert K_int.shape[0] == K_int.shape[1] == n_creeping\n assert K_ext.shape[0] == n_creeping\n # split up state\n state_upper = state[:n_state_upper]\n state_lower = state[n_state_upper:]\n # extract total velocities\n v_upper = v_0 * np.exp(state_upper[n_creeping_upper:])\n v_lower = state_lower[n_creeping_lower:]\n # get shear strain rate\n v = np.concatenate((v_upper, v_lower))\n dtaudt = (K_int @ (v - v_plate_vec) - np.sum(K_ext * v_plate_vec[0], axis=1))\n dtaudt_upper = dtaudt[:n_creeping_upper]\n dtaudt_lower = dtaudt[n_creeping_upper:]\n # get individual rheologies' ODE\n dstatedt_upper = \\\n np.concatenate((v_upper, dzetadt_rdlog(dtaudt_upper,\n np.ones_like(v_lower) * alpha_h_upper)))\n dstatedt_lower = \\\n np.concatenate((v_lower, dvdt_plvis(dtaudt_lower, v_lower,\n np.ones_like(v_lower) * A_lower,\n np.ones_like(v_upper) * n_lower)))\n # concatenate and return\n return np.concatenate((dstatedt_upper, dstatedt_lower))\n\n\n# simple rk4\n@njit(float64[:, :](float64, float64, float64[:], float64[:], int64, float64[:],\n float64[:, ::1], float64[:, ::1], float64, float64, float64, float64),\n cache=True)\ndef myrk4(ti, tf, state0, t_eval, n_creeping_upper, v_plate_vec,\n K_int, K_ext, A_upper, n_upper, A_lower, n_lower):\n h = t_eval[1] - t_eval[0]\n num_state = state0.size\n num_eval = t_eval.size\n sol = np.zeros((num_eval, num_state))\n sol[0, :] = state0\n for i in range(1, num_eval):\n cur = sol[i-1, :]\n k1 = flat_ode_plvis_plvis(ti, cur, n_creeping_upper, v_plate_vec, K_int, K_ext,\n A_upper, n_upper, A_lower, n_lower)\n cur = sol[i-1, :] + (h / 2) * k1\n k2 = flat_ode_plvis_plvis(ti, cur, n_creeping_upper, v_plate_vec, K_int, K_ext,\n A_upper, n_upper, A_lower, n_lower)\n cur = sol[i-1, :] + (h / 2) * k2\n k3 = flat_ode_plvis_plvis(ti, cur, n_creeping_upper, v_plate_vec, K_int, K_ext,\n A_upper, n_upper, A_lower, n_lower)\n cur = sol[i-1, :] + h * k3\n k4 = flat_ode_plvis_plvis(ti, cur, n_creeping_upper, v_plate_vec, K_int, K_ext,\n A_upper, n_upper, A_lower, n_lower)\n sol[i, :] = sol[i-1, :] + (h / 6) * (k1 + 2 * k2 + 2 * k3 + k4)\n return sol\n\n\n@njit(float64[:, :](float64[:], int64[:], int64[:], int64, int64, float64[:, ::1],\n float64[:, ::1], float64[:], float64[:], float64[:, ::1], float64[:, ::1],\n float64[:], float64[:], float64[:], float64), cache=True)\ndef flat_run_plvis(t_eval, i_break, i_eq,\n n_creeping_upper, n_creeping_lower, K_int, K_ext,\n v_plate_vec, v_init, slip_taper, delta_tau_bounded,\n alpha_n_vec, n_vec, A_vec, mu_over_2vs):\n r\"\"\"\n Run the simulation.\n\n Parameters\n ----------\n t_eval : numpy.ndarray\n Evaluation times [s]\n i_break : numpy.ndarray\n Integer indices of cycle breaks [-]\n i_eq : numpy.ndarray\n Integer indices of earthquakes within sequence [-]\n n_creeping_upper : int\n Number [-] of creeping patches in the upper fault interface\n n_creeping_lower : int\n Number [-] of creeping patches in the lower fault interface\n K_int : numpy.ndarray\n Internal stress kernel [Pa/m]\n K_ext : numpy.ndarray\n External stress kernel [Pa/m]\n v_plate_vec : numpy.ndarray\n Plate velocity for all creeping patches [m/s]\n v_init : numpy.ndarray\n Initial velocity of the fault patches, in the dimensions of the rheology\n slip_taper : numpy.ndarray\n Compensating coseismic tapered slip on creeping patches [m]\n delta_tau_bounded : numpy.ndarray\n Bounded coseismic stress change [Pa]\n alpha_n_vec : numpy.ndarray\n Upper plate interface nonlinear viscous rheology strength constant [Pa^n * s/m]\n at each patch\n n_vec : float\n Upper plate interface power-law exponent [-] at each patch\n A_vec : float\n Rescaled upper plate interface nonlinear viscous rheology strength constant\n [Pa^n * s/m] at each patch\n mu_over_2vs : float\n Radiation damping factor :math:`\\mu / 2 v_s`, where :math:`\\mu` is the shear\n modulus [Pa] and :math:`v_s` is the shear wave velocity [m/s]\n\n Returns\n -------\n full_state : numpy.ndarray\n Full state variable at the end of the integration.\n \"\"\"\n # initialize parameters\n n_vars_upper, n_vars_lower = 2, 2\n n_state_upper = n_vars_upper * n_creeping_upper\n n_state_lower = n_vars_lower * n_creeping_lower\n n_eval = t_eval.size\n n_slips = delta_tau_bounded.shape[1]\n\n # initialize arrays\n s_minus_upper = np.zeros((n_vars_upper - 1) * n_creeping_upper)\n s_minus_lower = np.zeros(n_creeping_lower)\n v_minus_upper = v_init[:n_creeping_upper]\n v_minus_lower = v_plate_vec[n_creeping_upper:]\n full_state = np.empty((n_state_upper + n_state_lower, n_eval))\n full_state[:] = np.NaN\n state_plus = np.concatenate((s_minus_upper, v_minus_upper, s_minus_lower, v_minus_lower))\n\n # make flat ODE function arguments\n args = (v_plate_vec[0], K_int[:n_creeping_upper, :n_creeping_upper].copy(),\n K_ext[:n_creeping_upper, :], A_vec, n_vec, mu_over_2vs)\n\n # integrate\n spun_up = 0\n i_slip = 0\n steps = np.sort(np.concatenate((i_eq, i_break)))\n i = 0\n atol = np.ones(n_state_upper) * 1e-6\n atol[n_creeping_upper:] = 1e-15\n while i < steps.size - 1:\n # print(f\"{i+1}/{steps.size - 1}\")\n # get indices\n ji, jf = steps[i], steps[i+1]\n ti, tf = t_eval[ji], t_eval[jf]\n # call integrator\n with objmode(sol=\"float64[:, :]\", success=\"boolean\"):\n sol = solve_ivp(flat_ode_plvis,\n t_span=[ti, tf],\n y0=state_plus[:n_state_upper],\n t_eval=t_eval[ji:jf + 1],\n method=\"LSODA\", rtol=1e-6, atol=atol, args=args)\n success = sol.success\n if success:\n sol = sol.y\n else:\n sol = np.empty((1, 1))\n if not success:\n raise RuntimeError(\"Integrator failed.\")\n # save state to output array\n full_state[:n_state_upper, ji:jf + 1] = sol\n # fill in the imposed lower state\n full_state[n_state_upper:n_state_upper + n_creeping_lower, ji:jf + 1] = \\\n np.ascontiguousarray(v_plate_vec[n_creeping_upper:]).reshape((-1, 1)) \\\n * np.ascontiguousarray(t_eval[ji:jf + 1]).reshape((1, -1))\n full_state[n_state_upper + n_creeping_lower:, ji:jf + 1] = \\\n np.ascontiguousarray(v_plate_vec[n_creeping_upper:]).reshape((-1, 1))\n # can already stop here if this is the last interval\n if i == steps.size - 2:\n break\n # at the end of a full cycle, check the early stopping criteria\n if (not spun_up) and (i > n_slips) and (jf in i_break):\n old_full_state = full_state[:, steps[i-2*n_slips-1]:steps[i-n_slips]]\n new_full_state = full_state[:, steps[i-n_slips]:steps[i+1]]\n old_state_upper = old_full_state[:n_state_upper, :]\n new_state_upper = new_full_state[:n_state_upper, :]\n old_v_upper = old_state_upper[-n_creeping_upper:, -1]\n new_v_upper = new_state_upper[-n_creeping_upper:, -1]\n lhs_upper = np.abs(old_v_upper - new_v_upper)\n rhs_upper = (1e-3) * np.abs(v_plate_vec[0]) + (1e-3) * np.abs(new_v_upper)\n stop_now = np.all(lhs_upper <= rhs_upper)\n if stop_now:\n spun_up = jf\n # advance i to the last cycle (don't forget the general advance later)\n i = steps.size - n_slips - 3\n elif spun_up and (jf in i_break):\n break\n # apply step change only if there is one\n if (jf in i_eq):\n state_upper, state_lower = sol[:n_state_upper, -1], sol[n_state_upper:, -1]\n s_minus_upper = state_upper[:-n_creeping_upper]\n v_minus_upper = state_upper[-n_creeping_upper:]\n s_minus_lower = state_lower[:-n_creeping_lower]\n v_minus_lower = state_lower[-n_creeping_lower:]\n s_plus_upper = s_minus_upper.ravel().copy()\n s_plus_upper[:n_creeping_upper] += slip_taper[:, i_slip]\n s_plus_lower = s_minus_lower.ravel()\n v_plus_upper = get_new_vel_plvis(v_minus_upper,\n delta_tau_bounded[:n_creeping_upper, i_slip],\n alpha_n_vec, n_vec, A_vec)\n v_plus_lower = v_minus_lower.ravel()\n state_plus = np.concatenate((s_plus_upper, v_plus_upper,\n s_plus_lower, v_plus_lower))\n i_slip = (i_slip + 1) % n_slips\n else:\n state_plus = sol[:, -1]\n # advance\n i += 1\n\n # warn if we never spun up\n if not spun_up:\n print(f\"Simulation did not spin up after {len(i_break) - 1} cycles!\")\n\n # done\n return full_state\n\n\n@njit(float64[:, :](float64[:], int64[:], int64[:], int64, int64, float64[:, ::1],\n float64[:, ::1], float64[:], float64[:], float64[:, ::1], float64[:, ::1],\n float64, float64[:], float64), cache=True)\ndef flat_run_rdlog(t_eval, i_break, i_eq,\n n_creeping_upper, n_creeping_lower, K_int, K_ext,\n v_plate_vec, v_init, slip_taper, delta_tau_bounded,\n v_0, alpha_h_vec, mu_over_2vs):\n r\"\"\"\n Run the simulation.\n\n Parameters\n ----------\n t_eval : numpy.ndarray\n Evaluation times [s]\n i_break : numpy.ndarray\n Integer indices of cycle breaks [-]\n i_eq : numpy.ndarray\n Integer indices of earthquakes within sequence [-]\n n_creeping_upper : int\n Number [-] of creeping patches in the upper fault interface\n n_creeping_lower : int\n Number [-] of creeping patches in the lower fault interface\n K_int : numpy.ndarray\n Internal stress kernel [Pa/m]\n K_ext : numpy.ndarray\n External stress kernel [Pa/m]\n v_plate_vec : numpy.ndarray\n Plate velocity for all creeping patches [m/s]\n v_init : numpy.ndarray\n Initial velocity of the fault patches, in the dimensions of the rheology\n slip_taper : numpy.ndarray\n Compensating coseismic tapered slip on creeping patches [m]\n delta_tau_bounded : numpy.ndarray\n Bounded coseismic stress change [Pa]\n v_0 : float\n Reference velocity [m/s]\n alpha_h_vec : numpy.ndarray\n Upper interface rate-and-state parameter :math:`(a - b) * \\sigma_E` [Pa]\n mu_over_2vs : float\n Radiation damping factor :math:`\\mu / 2 v_s`, where :math:`\\mu` is the shear\n modulus [Pa] and :math:`v_s` is the shear wave velocity [m/s]\n\n Returns\n -------\n full_state : numpy.ndarray\n Full state variable at the end of the integration.\n \"\"\"\n # initialize parameters\n n_vars_upper, n_vars_lower = 2, 2\n n_state_upper = n_vars_upper * n_creeping_upper\n n_state_lower = n_vars_lower * n_creeping_lower\n n_eval = t_eval.size\n n_slips = delta_tau_bounded.shape[1]\n\n # initialize arrays\n s_minus_upper = np.zeros((n_vars_upper - 1) * n_creeping_upper)\n s_minus_lower = np.zeros(n_creeping_lower)\n assert np.all(v_init[:n_creeping_upper] > 0)\n zeta_minus_upper = np.log(v_init[:n_creeping_upper] / v_0)\n v_minus_lower = v_plate_vec[n_creeping_upper:]\n full_state = np.empty((n_state_upper + n_state_lower, n_eval))\n full_state[:] = np.NaN\n state_plus = np.concatenate((s_minus_upper, zeta_minus_upper,\n s_minus_lower, v_minus_lower))\n\n # make flat ODE function arguments\n args = (v_plate_vec[0], K_int[:n_creeping_upper, :n_creeping_upper].copy(),\n K_ext[:n_creeping_upper, :], v_0, alpha_h_vec, mu_over_2vs)\n\n # integrate\n spun_up = 0\n i_slip = 0\n steps = np.sort(np.concatenate((i_eq, i_break)))\n i = 0\n while i < steps.size - 1:\n # print(f\"{i+1}/{steps.size - 1}\")\n # get indices\n ji, jf = steps[i], steps[i+1]\n ti, tf = t_eval[ji], t_eval[jf]\n # call integrator\n with objmode(sol=\"float64[:, :]\", success=\"boolean\"):\n sol = solve_ivp(flat_ode_rdlog,\n t_span=[ti, tf],\n y0=state_plus[:n_state_upper],\n t_eval=t_eval[ji:jf + 1],\n method=\"LSODA\", args=args)\n success = sol.success\n if success:\n sol = sol.y\n else:\n sol = np.empty((1, 1))\n if not success:\n raise RuntimeError(\"Integrator failed.\")\n # save state to output array\n full_state[:n_state_upper, ji:jf + 1] = sol\n # fill in the imposed lower state\n full_state[n_state_upper:n_state_upper + n_creeping_lower, ji:jf + 1] = \\\n np.ascontiguousarray(v_plate_vec[n_creeping_upper:]).reshape((-1, 1)) \\\n * np.ascontiguousarray(t_eval[ji:jf + 1]).reshape((1, -1))\n full_state[n_state_upper + n_creeping_lower:, ji:jf + 1] = \\\n np.ascontiguousarray(v_plate_vec[n_creeping_upper:]).reshape((-1, 1))\n # can already stop here if this is the last interval\n if i == steps.size - 2:\n break\n # at the end of a full cycle, check the early stopping criteria\n if (not spun_up) and (i > n_slips) and (jf in i_break):\n old_full_state = full_state[:, steps[i-2*n_slips-1]:steps[i-n_slips]]\n new_full_state = full_state[:, steps[i-n_slips]:steps[i+1]]\n old_state_upper = old_full_state[:n_state_upper, :]\n new_state_upper = new_full_state[:n_state_upper, :]\n old_v_upper = v_0 * np.exp(old_state_upper[-n_creeping_upper:, -1])\n new_v_upper = v_0 * np.exp(new_state_upper[-n_creeping_upper:, -1])\n lhs_upper = np.abs(old_v_upper - new_v_upper)\n rhs_upper = (1e-3) * np.abs(v_plate_vec[0]) + (1e-3) * np.abs(new_v_upper)\n stop_now = np.all(lhs_upper <= rhs_upper)\n if stop_now:\n spun_up = jf\n # advance i to the last cycle (don't forget the general advance later)\n i = steps.size - n_slips - 3\n elif spun_up and (jf in i_break):\n break\n # apply step change only if there is one\n if (jf in i_eq):\n state_upper, state_lower = sol[:n_state_upper, -1], sol[n_state_upper:, -1]\n s_minus_upper = state_upper[:-n_creeping_upper]\n zeta_minus_upper = state_upper[-n_creeping_upper:]\n s_minus_lower = state_lower[:-n_creeping_lower]\n v_minus_lower = state_lower[-n_creeping_lower:]\n s_plus_upper = s_minus_upper.ravel().copy()\n s_plus_upper[:n_creeping_upper] += slip_taper[:, i_slip]\n s_plus_lower = s_minus_lower.ravel()\n zeta_plus_upper = get_new_vel_rdlog(zeta_minus_upper,\n delta_tau_bounded[:n_creeping_upper, i_slip],\n alpha_h_vec)\n v_plus_lower = v_minus_lower.ravel()\n state_plus = np.concatenate((s_plus_upper, zeta_plus_upper,\n s_plus_lower, v_plus_lower))\n i_slip = (i_slip + 1) % n_slips\n else:\n state_plus = sol[:, -1]\n # advance\n i += 1\n\n # warn if we never spun up\n if not spun_up:\n print(f\"Simulation did not spin up after {len(i_break) - 1} cycles!\")\n\n full_state[n_creeping_upper:n_state_upper, :] = \\\n v_0 * np.exp(full_state[n_creeping_upper:n_state_upper, :])\n\n # done\n return full_state\n\n\n@njit(float64[:, :](float64[:], int64[:], int64[:], int64, int64, float64[:, ::1],\n float64[:, ::1], float64[:], float64[:], float64[:, ::1], float64[:, ::1],\n float64, float64, float64, float64, boolean), cache=True)\ndef flat_run_plvis_plvis(t_eval, i_break, i_eq,\n n_creeping_upper, n_creeping_lower, K_int, K_ext,\n v_plate_vec, v_init, slip_taper, delta_tau_bounded,\n alpha_n_upper, n_upper, alpha_n_lower, n_lower,\n simple_rk4):\n \"\"\"\n Run the simulation.\n\n Parameters\n ----------\n t_eval : numpy.ndarray\n Evaluation times [s]\n i_break : numpy.ndarray\n Integer indices of cycle breaks [-]\n i_eq : numpy.ndarray\n Integer indices of earthquakes within sequence [-]\n n_creeping_upper : int\n Number [-] of creeping patches in the upper fault interface\n n_creeping_lower : int\n Number [-] of creeping patches in the lower fault interface\n K_int : numpy.ndarray\n Internal stress kernel [Pa/m]\n K_ext : numpy.ndarray\n External stress kernel [Pa/m]\n v_plate_vec : numpy.ndarray\n Plate velocity for all creeping patches [m/s]\n v_init : numpy.ndarray\n Initial velocity of the fault patches, in the dimensions of the rheology\n slip_taper : numpy.ndarray\n Compensating coseismic tapered slip on creeping patches [m]\n delta_tau_bounded : numpy.ndarray\n Bounded coseismic stress change [Pa]\n alpha_n_upper : float\n Upper plate interface nonlinear viscous rheology strength constant [Pa^n * s/m]\n n_upper : float\n Upper plate interface power-law exponent [-]\n alpha_n_lower : float\n Lower plate interface nonlinear viscous rheology strength constant [Pa^n * s/m]\n n_lower : float\n Lower plate interface power-law exponent [-]\n simple_rk4 : bool\n Decide whether to use the simple RK4 integrator or not\n\n Returns\n -------\n full_state : numpy.ndarray\n Full state variable at the end of the integration.\n \"\"\"\n # initialize parameters\n n_vars_upper, n_vars_lower = 2, 2\n n_state_upper = n_vars_upper * n_creeping_upper\n n_state_lower = n_vars_lower * n_creeping_lower\n A_upper = alpha_n_upper ** (1 / n_upper)\n A_lower = alpha_n_lower ** (1 / n_lower)\n n_eval = t_eval.size\n n_slips = delta_tau_bounded.shape[1]\n\n # initialize arrays\n s_minus_upper = np.zeros((n_vars_upper - 1) * n_creeping_upper)\n s_minus_lower = np.zeros(n_creeping_lower)\n v_minus_upper = v_init[:n_creeping_upper]\n # if isinstance(self.fault.upper_rheo, rheologies.RateStateSteadyLogarithmic):\n # v_minus_upper = self.fault.upper_rheo.v2zeta(v_minus_upper)\n v_minus_lower = v_init[n_creeping_upper:]\n full_state = np.empty((n_state_upper + n_state_lower, n_eval))\n full_state[:] = np.NaN\n state_plus = np.concatenate((s_minus_upper, v_minus_upper, s_minus_lower, v_minus_lower))\n\n # make flat ODE function arguments\n args = (n_creeping_upper, v_plate_vec, K_int, K_ext,\n A_upper, n_upper, A_lower, n_lower)\n\n # integrate\n spun_up = 0\n i_slip = 0\n steps = np.sort(np.concatenate((i_eq, i_break)))\n i = 0\n while i < steps.size - 1:\n # get indices\n ji, jf = steps[i], steps[i+1]\n ti, tf = t_eval[ji], t_eval[jf]\n # call integrator\n if simple_rk4:\n sol = myrk4(ti, tf, state_plus, t_eval[ji:jf + 1], *args).T\n else:\n with objmode(sol=\"float64[:, :]\", success=\"boolean\"):\n sol = solve_ivp(flat_ode_plvis_plvis,\n t_span=[ti, tf],\n y0=state_plus,\n t_eval=t_eval[ji:jf + 1],\n method=\"RK45\", rtol=1e-9, atol=1e-12, args=args)\n success = sol.success\n sol = sol.y\n if not success:\n raise RuntimeError(\"Integrator failed.\")\n # save state to output array\n full_state[:, ji:jf + 1] = sol\n # can already stop here if this is the last interval\n if i == steps.size - 2:\n break\n # at the end of a full cycle, check the early stopping criteria\n if (not spun_up) and (i > n_slips) and (jf in i_break):\n old_full_state = full_state[:, steps[i-2*n_slips-1]:steps[i-n_slips]]\n new_full_state = full_state[:, steps[i-n_slips]:steps[i+1]]\n old_state_upper = old_full_state[:n_state_upper, :]\n old_state_lower = old_full_state[n_state_upper:, :]\n new_state_upper = new_full_state[:n_state_upper, :]\n new_state_lower = new_full_state[n_state_upper:, :]\n old_v_upper = old_state_upper[-n_creeping_upper:, -1]\n old_v_lower = old_state_lower[-n_creeping_lower:, -1]\n new_v_upper = new_state_upper[-n_creeping_upper:, -1]\n new_v_lower = new_state_lower[-n_creeping_lower:, -1]\n # if isinstance(self.fault.upper_rheo, rheologies.RateStateSteadyLogarithmic):\n # old_v_upper = self.fault.upper_rheo.zeta2v(old_v_upper)\n # new_v_upper = self.fault.upper_rheo.zeta2v(new_v_upper)\n lhs_upper = np.abs(old_v_upper - new_v_upper)\n lhs_lower = np.abs(old_v_lower - new_v_lower)\n rhs_upper = (1e-4) * np.abs(v_plate_vec[0]) + (1e-4) * np.abs(new_v_upper)\n rhs_lower = (1e-4) * np.abs(v_plate_vec[-1]) + (1e-4) * np.abs(new_v_lower)\n stop_now = np.all(lhs_upper <= rhs_upper) & np.all(lhs_lower <= rhs_lower)\n if stop_now:\n spun_up = jf\n # advance i to the last cycle (don't forget the general advance later)\n i = steps.size - n_slips - 3\n elif spun_up and (jf in i_break):\n break\n # apply step change only if there is one\n if (jf in i_eq):\n state_upper, state_lower = sol[:n_state_upper, -1], sol[n_state_upper:, -1]\n s_minus_upper = state_upper[:-n_creeping_upper]\n v_minus_upper = state_upper[-n_creeping_upper:]\n s_minus_lower = state_lower[:-n_creeping_lower]\n v_minus_lower = state_lower[-n_creeping_lower:]\n s_plus_upper = s_minus_upper.ravel().copy()\n s_plus_upper[:n_creeping_upper] += slip_taper[:, i_slip]\n s_plus_lower = s_minus_lower.ravel()\n v_plus_upper = get_new_vel_plvis(v_minus_upper,\n delta_tau_bounded[:n_creeping_upper, i_slip],\n np.ones(n_creeping_upper) * alpha_n_upper,\n np.ones(n_creeping_upper) * n_upper,\n np.ones(n_creeping_upper) * A_upper)\n v_plus_lower = get_new_vel_plvis(v_minus_lower,\n delta_tau_bounded[n_creeping_upper:, i_slip],\n np.ones(n_creeping_upper) * alpha_n_lower,\n np.ones(n_creeping_upper) * n_lower,\n np.ones(n_creeping_upper) * A_lower)\n state_plus = np.concatenate((s_plus_upper, v_plus_upper,\n s_plus_lower, v_plus_lower))\n i_slip = (i_slip + 1) % n_slips\n else:\n state_plus = sol[:, -1]\n # advance\n i += 1\n\n # warn if we never spun up\n if not spun_up:\n print(f\"Simulation did not spin up after {len(i_break) - 1} cycles!\")\n\n # if isinstance(self.fault.upper_rheo, rheologies.RateStateSteadyLogarithmic):\n # vel_upper = self.fault.upper_rheo.zeta2v(vel_upper)\n\n # done\n return full_state\n\n\n@njit(float64[:, :](float64[:], int64[:], int64[:], int64, int64, float64[:, ::1],\n float64[:, ::1], float64[:], float64[:], float64[:, ::1], float64[:, ::1],\n float64, float64, float64, float64, boolean), cache=True)\ndef flat_run_rdlog_plvis(t_eval, i_break, i_eq,\n n_creeping_upper, n_creeping_lower, K_int, K_ext,\n v_plate_vec, v_init, slip_taper, delta_tau_bounded,\n v_0, alpha_h_upper, alpha_n_lower, n_lower,\n simple_rk4):\n r\"\"\"\n Run the simulation.\n\n Parameters\n ----------\n t_eval : numpy.ndarray\n Evaluation times [s]\n i_break : numpy.ndarray\n Integer indices of cycle breaks [-]\n i_eq : numpy.ndarray\n Integer indices of earthquakes within sequence [-]\n n_creeping_upper : int\n Number [-] of creeping patches in the upper fault interface\n n_creeping_lower : int\n Number [-] of creeping patches in the lower fault interface\n K_int : numpy.ndarray\n Internal stress kernel [Pa/m]\n K_ext : numpy.ndarray\n External stress kernel [Pa/m]\n v_plate_vec : numpy.ndarray\n Plate velocity for all creeping patches [m/s]\n v_init : numpy.ndarray\n Initial velocity of the fault patches, in the dimensions of the rheology\n slip_taper : numpy.ndarray\n Compensating coseismic tapered slip on creeping patches [m]\n delta_tau_bounded : numpy.ndarray\n Bounded coseismic stress change [Pa]\n v_0 : float\n Reference velocity [m/s]\n alpha_h_upper : float\n Upper interface rate-and-state parameter :math:`(a - b) * \\sigma_E` [Pa]\n alpha_n_lower : float\n Lower plate interface nonlinear viscous rheology strength constant [Pa^n * s/m]\n n_lower : float\n Lower plate interface power-law exponent [-]\n simple_rk4 : bool\n Decide whether to use the simple RK4 integrator or not\n\n Returns\n -------\n full_state : numpy.ndarray\n Full state variable at the end of the integration.\n \"\"\"\n # initialize parameters\n n_vars_upper, n_vars_lower = 2, 2\n n_state_upper = n_vars_upper * n_creeping_upper\n n_state_lower = n_vars_lower * n_creeping_lower\n A_lower = alpha_n_lower ** (1 / n_lower)\n n_eval = t_eval.size\n n_slips = delta_tau_bounded.shape[1]\n\n # initialize arrays\n s_minus_upper = np.zeros((n_vars_upper - 1) * n_creeping_upper)\n s_minus_lower = np.zeros(n_creeping_lower)\n assert np.all(v_init[:n_creeping_upper] > 0)\n v_minus_upper = np.log(v_init[:n_creeping_upper] / v_0)\n # if isinstance(self.fault.upper_rheo, rheologies.RateStateSteadyLogarithmic):\n # v_minus_upper = self.fault.upper_rheo.v2zeta(v_minus_upper)\n v_minus_lower = v_init[n_creeping_upper:]\n full_state = np.empty((n_state_upper + n_state_lower, n_eval))\n full_state[:] = np.NaN\n state_plus = np.concatenate((s_minus_upper, v_minus_upper, s_minus_lower, v_minus_lower))\n\n # make flat ODE function arguments\n args = (n_creeping_upper, v_plate_vec, K_int, K_ext,\n v_0, alpha_h_upper, A_lower, n_lower)\n\n # integrate\n spun_up = 0\n i_slip = 0\n steps = np.sort(np.concatenate((i_eq, i_break)))\n i = 0\n while i < steps.size - 1:\n # get indices\n ji, jf = steps[i], steps[i+1]\n ti, tf = t_eval[ji], t_eval[jf]\n # call integrator\n if simple_rk4:\n sol = myrk4(ti, tf, state_plus, t_eval[ji:jf + 1], *args).T\n else:\n with objmode(sol=\"float64[:, :]\", success=\"boolean\"):\n sol = solve_ivp(flat_ode_rdlog_plvis,\n t_span=[ti, tf],\n y0=state_plus,\n t_eval=t_eval[ji:jf + 1],\n method=\"RK45\", rtol=1e-9, atol=1e-12, args=args)\n success = sol.success\n sol = sol.y\n if not success:\n raise RuntimeError(\"Integrator failed.\")\n # save state to output array\n full_state[:, ji:jf + 1] = sol\n # can already stop here if this is the last interval\n if i == steps.size - 2:\n break\n # at the end of a full cycle, check the early stopping criteria\n if (not spun_up) and (i > n_slips) and (jf in i_break):\n old_full_state = full_state[:, steps[i-2*n_slips-1]:steps[i-n_slips]]\n new_full_state = full_state[:, steps[i-n_slips]:steps[i+1]]\n old_state_upper = old_full_state[:n_state_upper, :]\n old_state_lower = old_full_state[n_state_upper:, :]\n new_state_upper = new_full_state[:n_state_upper, :]\n new_state_lower = new_full_state[n_state_upper:, :]\n old_v_upper = v_0 * np.exp(old_state_upper[-n_creeping_upper:, -1])\n old_v_lower = old_state_lower[-n_creeping_lower:, -1]\n new_v_upper = v_0 * np.exp(new_state_upper[-n_creeping_upper:, -1])\n new_v_lower = new_state_lower[-n_creeping_lower:, -1]\n # if isinstance(self.fault.upper_rheo, rheologies.RateStateSteadyLogarithmic):\n # old_v_upper = self.fault.upper_rheo.zeta2v(old_v_upper)\n # new_v_upper = self.fault.upper_rheo.zeta2v(new_v_upper)\n lhs_upper = np.abs(old_v_upper - new_v_upper)\n lhs_lower = np.abs(old_v_lower - new_v_lower)\n rhs_upper = (1e-4) * np.abs(v_plate_vec[0]) + (1e-4) * np.abs(new_v_upper)\n rhs_lower = (1e-4) * np.abs(v_plate_vec[-1]) + (1e-4) * np.abs(new_v_lower)\n stop_now = np.all(lhs_upper <= rhs_upper) & np.all(lhs_lower <= rhs_lower)\n if stop_now:\n spun_up = jf\n # advance i to the last cycle (don't forget the general advance later)\n i = steps.size - n_slips - 3\n elif spun_up and (jf in i_break):\n break\n # apply step change only if there is one\n if (jf in i_eq):\n state_upper, state_lower = sol[:n_state_upper, -1], sol[n_state_upper:, -1]\n s_minus_upper = state_upper[:-n_creeping_upper]\n zeta_minus_upper = state_upper[-n_creeping_upper:]\n s_minus_lower = state_lower[:-n_creeping_lower]\n v_minus_lower = state_lower[-n_creeping_lower:]\n s_plus_upper = s_minus_upper.ravel().copy()\n s_plus_upper[:n_creeping_upper] += slip_taper[:, i_slip]\n s_plus_lower = s_minus_lower.ravel()\n zeta_plus_upper = get_new_vel_rdlog(zeta_minus_upper,\n delta_tau_bounded[:n_creeping_upper, i_slip],\n np.ones(n_creeping_upper) * alpha_h_upper)\n v_plus_lower = get_new_vel_plvis(v_minus_lower,\n delta_tau_bounded[n_creeping_upper:, i_slip],\n np.ones(n_creeping_upper) * alpha_n_lower,\n np.ones(n_creeping_upper) * n_lower,\n np.ones(n_creeping_upper) * A_lower)\n state_plus = np.concatenate((s_plus_upper, zeta_plus_upper,\n s_plus_lower, v_plus_lower))\n i_slip = (i_slip + 1) % n_slips\n else:\n state_plus = sol[:, -1]\n # advance\n i += 1\n\n # warn if we never spun up\n if not spun_up:\n print(f\"Simulation did not spin up after {len(i_break) - 1} cycles!\")\n\n full_state[n_creeping_upper:n_state_upper, :] = \\\n v_0 * np.exp(full_state[n_creeping_upper:n_state_upper, :])\n # if isinstance(self.fault.upper_rheo, rheologies.RateStateSteadyLogarithmic):\n # vel_upper = self.fault.upper_rheo.zeta2v(vel_upper)\n\n # done\n return full_state\n\n\n@njit(float64[:, :](float64[:, ::1], int64, int64, float64[:, ::1], float64[:, ::1]),\n cache=True)\n# optional(float64[:, ::1]), optional(float64[:, ::1])))\ndef get_surface_displacements_plvis_plvis(full_state, n_creeping_upper, n_creeping_lower,\n G_surf, deep_creep_slip): # , locked_slip):\n \"\"\"\n Calculate the surface displacements given the output of ``run``.\n\n Parameters\n ----------\n full_state : numpy.ndarray\n Full state variable at the end of the integration.\n n_creeping_upper : int\n Number [-] of creeping patches in the upper fault interface\n n_creeping_lower : int\n Number [-] of creeping patches in the lower fault interface\n G_surf : numpy.ndarray\n Surface displacements Green's matrix [-] (dimensions must whether `locked_slip`\n and/or `deep_creep_slip` are passed to function)\n deep_creep_slip : numpy.ndarray\n Timeseries of slip [m] on the deep creep patches\n locked_slip : numpy.ndarray, optional\n Timeseries of slip [m] on the locked patches\n\n Returns\n -------\n surf_disp : numpy.ndarray\n Surface displacement timeseries.\n \"\"\"\n # extract timeseries from solution\n slip_upper = full_state[:n_creeping_upper, :]\n slip_lower = full_state[2 * n_creeping_upper:2 * n_creeping_upper + n_creeping_lower, :]\n # add the locked and deep patches to the combined upper & lower slip history matrix\n slips_all = np.concatenate((slip_upper, slip_lower), axis=0)\n # if locked_slip is not None:s\n # slips_all = np.concatenate((locked_slip[:, :slip_upper.shape[1]], slips_all),\n # axis=0)\n # if deep_creep_slip is not None:\n slips_all = np.concatenate((slips_all, deep_creep_slip), axis=0)\n # calculate all surface displacements for last full cycle\n surf_disps = G_surf @ slips_all\n return surf_disps\n\n\nclass Fault2D():\n \"\"\"\n Base class for the subduction fault mesh.\n \"\"\"\n\n def __init__(self, theta, D_lock, H, nu, E, v_s, halflen,\n upper_rheo, n_upper, lower_rheo, n_lower_left,\n n_lower_right, halflen_factor_lower,\n D_max=None, x1_pretrench=None):\n \"\"\"\n Define the fault mesh of the subduction zone fault system, based on the\n Elastic Subducting Plate Model (ESPM) of [kanda2010]_.\n\n Parameters\n ----------\n theta : float\n Dip angle [rad] of the plate interface (positive).\n D_lock : float\n Locking depth [m] of the upper plate interface (positive).\n H : float\n Subducting plate thickness [m].\n nu : float\n Poisson's ratio [-] of the fault zone.\n E : float\n Young's modulus [Pa] of the fault zone.\n v_s : float\n Shear wave velocity [m/s] in the fault zone.\n halflen : float\n Fault patch half-length [m], used for all locked patches.\n If ``D_max`` and ``x1_pretrench`` are not set, this length is also used for all\n creeping patches, otherwise, this is their minimum half-length.\n upper_rheo : Rheology\n Upper plate interface's rheology.\n n_upper : int\n Number [-] of patches on upper plate interface.\n lower_rheo : Rheology\n Lower plate interface's rheology. Pass ``None`` if it should not be simulated,\n but enforced to have the plate velocity.\n n_lower_left : int\n Number [-] of patches on lower plate interface (left of the bend).\n n_lower_right : int\n Number [-] of patches on lower plate interface (right of the bend).\n halflen_factor_lower : float\n Factor used to get a different minimum half-length of the patches on the lower\n plate interface.\n D_max : float, optional\n Maximum depth [m] of the upper plate interface (positive).\n If set, this makes the mesh use linearly-increasing patch sizes away from the\n locked zone. (``x1_pretrench`` must be set as well.)\n x1_pretrench : float, optional\n Horizontal distance [m] of the lower plate interface before the trench (positive).\n If set, this makes the mesh use linearly-increasing patch sizes away from the\n locked zone. (``D_max`` must be set as well.)\n\n References\n ----------\n\n .. [kanda2010] Kanda, R. V. S., & Simons, M. (2010).\n *An elastic plate model for interseismic deformation in subduction zones.*\n Journal of Geophysical Research: Solid Earth, 115(B3).\n doi:`10.1029/2009JB006611 `_.\n \"\"\"\n # initialize\n self.theta = float(theta)\n \"\"\" Subducting plate dip angle [rad] \"\"\"\n assert 0 < self.theta < np.pi / 2\n self.D_lock = float(D_lock)\n \"\"\" Theoretical locking depth [m] of the upper plate interface \"\"\"\n assert self.D_lock > 0\n self.H = float(H)\n \"\"\" Subducting plate thickness [m] \"\"\"\n assert self.H >= 0\n self.nu = float(nu)\n \"\"\" Poisson's ratio [-] of the fault zone \"\"\"\n self.E = float(E)\n \"\"\" Young's modulus [Pa] of the fault zone \"\"\"\n self.halflen = float(halflen)\n \"\"\" Fault patch half-length [m] on upper interface \"\"\"\n assert self.halflen > 0\n self.upper_rheo = upper_rheo\n \"\"\" Upper plate interface's rheology \"\"\"\n assert isinstance(self.upper_rheo, Rheology)\n self.n_upper = int(n_upper)\n \"\"\" Number [-] of patches on upper plate interface \"\"\"\n assert self.n_upper >= 1\n self.lower_rheo = lower_rheo\n \"\"\" Lower plate interface's rheology \"\"\"\n assert isinstance(self.lower_rheo, Rheology) or \\\n (self.lower_rheo is None)\n self.n_lower_left = int(n_lower_left)\n \"\"\" Number [-] of patches on lower plate interface (left of bend) \"\"\"\n assert self.n_lower_left >= 1\n self.n_lower_right = int(n_lower_right)\n \"\"\" Number [-] of patches on lower plate interface (right of bend) \"\"\"\n assert self.n_lower_right >= 1\n self.halflen_factor_lower = float(halflen_factor_lower)\n \"\"\" Prefactor [-] to change the lower interface half-length \"\"\"\n assert self.halflen_factor_lower >= 1\n self.lower_halflen = self.halflen * self.halflen_factor_lower\n \"\"\" Fault patch half-length [m] on lower interface \"\"\"\n if self.lower_rheo is not None:\n assert self.H >= 2 * self.lower_halflen, \"Plate too thin for given patch sizes.\"\n self.v_s = float(v_s)\n \"\"\" Shear wave velocity [m/s] in the fault zone \"\"\"\n self.mu_over_2vs = self.E / (2 * (1 + self.nu) * 2 * self.v_s)\n \"\"\" Radiation damping term [Pa * s/m] \"\"\"\n\n # switch between constant or linearly-varying patch sizes\n if (D_max is not None) and (x1_pretrench is not None):\n D_max = float(D_max)\n x1_pretrench = float(x1_pretrench)\n assert D_max > 0\n assert x1_pretrench > 0\n variable_mesh = True\n else:\n D_max = None\n x1_pretrench = None\n variable_mesh = False\n self.D_max = D_max\n \"\"\" Maximum depth [m] of the upper plate interface (optional) \"\"\"\n self.x1_pretrench = x1_pretrench\n \"\"\" Horizontal distance [m] of the lower plate interface before the trench (optional) \"\"\"\n self.variable_mesh = variable_mesh\n \"\"\" Flag whether the creeping patches are linearly-varying in size, or not \"\"\"\n\n # create mesh, centered about the x2 axis\n if self.variable_mesh:\n # project the locking depth onto dip angle\n L_lock = self.D_lock / np.sin(self.theta)\n # get number of locked and creeping patches on upper interface\n n_lock = int(L_lock // (2 * self.halflen))\n n_creep_up = self.n_upper - n_lock\n assert n_creep_up > 0, \"Current geometry yields no upper creeping patches.\"\n # project maximum interface depth onto dip angle\n L_max = self.D_max / np.sin(self.theta)\n # get length of creeping segment that needs to be linearly varying\n delta_L = L_max - n_lock * 2 * self.halflen\n # get linear half-length increase necessary given the number of patches\n # and length of creeping segment, on all three interface regions\n delta_h_upper = ((delta_L - 2 * self.halflen * n_creep_up) /\n (n_creep_up**2 - n_creep_up))\n delta_h_lower_right = \\\n ((L_max - 2 * self.lower_halflen * self.n_lower_right) /\n (self.n_lower_right**2 - self.n_lower_right))\n delta_h_lower_left = \\\n ((self.x1_pretrench - 2 * self.lower_halflen * self.n_lower_left) /\n (self.n_lower_left**2 - self.n_lower_left))\n # check that we're not running into numerical problems from starkly\n # increasing patch sizes\n if any([d > 0.2 for d in [delta_h_upper / self.halflen,\n delta_h_lower_right / self.lower_halflen,\n delta_h_lower_left / self.lower_halflen]]):\n raise ValueError(\"Half-length increase greater than 20%.\")\n # build vector of half-lengths\n halflen_vec = np.concatenate([\n np.ones(n_lock) * self.halflen,\n self.halflen + np.arange(n_creep_up) * delta_h_upper,\n (self.lower_halflen + np.arange(self.n_lower_left) * delta_h_lower_left)[::-1],\n self.lower_halflen + np.arange(self.n_lower_right) * delta_h_lower_right])\n else:\n # build half-length vector from constant size\n halflen_vec = np.ones(self.n_upper + self.n_lower_left + self.n_lower_right\n ) * self.halflen\n halflen_vec[self.n_upper:] *= self.halflen_factor_lower\n self.halflen_vec = halflen_vec\n \"\"\" Half-lengths [m] for each patch in the fault \"\"\"\n s = self.H * np.tan(self.theta / 2)\n R = np.array([[np.cos(-self.theta), -np.sin(-self.theta)],\n [np.sin(-self.theta), np.cos(-self.theta)]])\n # upper plate interface\n upper_right_x1 = np.concatenate([[0], np.cumsum(2*self.halflen_vec[:self.n_upper])])\n upper_right_x2 = np.zeros_like(upper_right_x1)\n upper_right = R @ np.stack([upper_right_x1, upper_right_x2], axis=0)\n # lower left plate interface\n temp = self.halflen_vec[self.n_upper + self.n_lower_left - 1:self.n_upper - 1:-1]\n lower_left_x1 = -s - np.concatenate([[0], np.cumsum(2*temp)])[::-1]\n lower_left_x2 = -self.H * np.ones(self.n_lower_left + 1)\n lower_left = np.stack([lower_left_x1, lower_left_x2], axis=0)\n # lower right\n lower_right_x1 = np.concatenate([\n [0], np.cumsum(2*self.halflen_vec[self.n_upper + self.n_lower_left:])])\n lower_right_x2 = np.zeros_like(lower_right_x1)\n lower_right = (R @ np.stack([lower_right_x1, lower_right_x2], axis=0)\n - np.array([[s], [self.H]]))\n # concatenate mesh parts\n self.end_upper = upper_right\n \"\"\" 2-element coordinates of upper fault patch endpoints [m] \"\"\"\n self.end_lower = np.concatenate([lower_left, lower_right[:, 1:]], axis=1)\n \"\"\" 2-element coordinates of lower fault patch endpoints [m] \"\"\"\n self.end = np.concatenate([self.end_upper, self.end_lower], axis=1)\n \"\"\" 2-element coordinates of fault patch endpoints [m] \"\"\"\n self.mid = np.concatenate([upper_right[:, :-1] + upper_right[:, 1:],\n lower_left[:, :-1] + lower_left[:, 1:],\n lower_right[:, :-1] + lower_right[:, 1:]],\n axis=1) / 2\n \"\"\" 2-element coordinates of fault patch midpoints [m] \"\"\"\n self.mid_x1 = self.mid[0, :]\n \"\"\" :math:`x_1` coordinates of fault patch midpoints [m] \"\"\"\n self.mid_x2 = self.mid[1, :]\n \"\"\" :math:`x_2` coordinates of fault patch midpoints [m] \"\"\"\n # access subparts\n self.ix_upper = np.arange(self.mid_x1.size) < upper_right_x1.size\n \"\"\" Mask of upper fault interface patches \"\"\"\n self.ix_lower = ~self.ix_upper\n \"\"\" Mask of lower fault interface patches (if existing) \"\"\"\n # locked is the part that slips coseismically on the upper plate interface\n self.x1_lock = self.D_lock / np.tan(self.theta)\n \"\"\" Theoretical surface location [m] of end of locked interface \"\"\"\n ix_locked = self.mid_x1 <= self.x1_lock - self.halflen\n ix_locked[self.n_upper:] = False\n self.ix_locked = ix_locked\n \"\"\" Mask of fault patches that are locked interseismically \"\"\"\n self.n_locked = (self.ix_locked).sum()\n \"\"\" Number [-] of locked patches \"\"\"\n # assert self.n_locked == n_lock\n self.n_creeping = (~self.ix_locked).sum()\n \"\"\" Number [-] of creeping patches \"\"\"\n self.n_creeping_upper = (~self.ix_locked[:self.n_upper]).sum()\n \"\"\" Number [-] of creeping patches in the upper fault interface \"\"\"\n # assert self.n_creeping_upper == n_creep_up\n self.n_creeping_lower = self.n_creeping - self.n_creeping_upper\n \"\"\" Number [-] of creeping patches in the lower fault interface \"\"\"\n assert self.n_creeping_lower == n_lower_left + n_lower_right\n self.mid_x1_locked = self.mid_x1[self.ix_locked]\n \"\"\" :math:`x_1` coordinates of locked fault patch midpoints [m] \"\"\"\n self.mid_x2_locked = self.mid_x2[self.ix_locked]\n \"\"\" :math:`x_2` coordinates of locked fault patch midpoints [m] \"\"\"\n self.mid_x1_creeping = self.mid_x1[~self.ix_locked]\n \"\"\" :math:`x_1` coordinates of creeping fault patch midpoints [m] \"\"\"\n self.mid_x2_creeping = self.mid_x2[~self.ix_locked]\n \"\"\" :math:`x_2` coordinates of creeping fault patch midpoints [m] \"\"\"\n # for later calculations, need theta and unit vectors in vector form\n theta_vec = np.ones_like(self.mid_x1) * self.theta\n theta_vec[self.n_upper:self.n_upper + self.n_lower_left] = np.pi\n theta_vec[self.n_upper + self.n_lower_left:] += np.pi\n self.theta_vec = theta_vec\n \"\"\" Plate dip angle [rad] for all fault patches \"\"\"\n self.e_f = np.stack([np.sin(self.theta_vec), np.cos(self.theta_vec)], axis=0)\n \"\"\" Unit vectors [-] normal to fault patches\"\"\"\n self.e_s = np.stack([-np.cos(self.theta_vec), np.sin(self.theta_vec)], axis=0)\n \"\"\" Unit vectors [-] in fault patch slip direction \"\"\"\n # get external (from the locked to the creeping patches) stress kernel\n K = Klinedisp(self.mid_x1_creeping, self.mid_x2_creeping,\n self.mid_x1_locked, self.mid_x2_locked,\n self.halflen_vec[self.ix_locked],\n self.theta_vec[self.ix_locked], self.nu, self.E\n )[:, :self.n_locked]\n Kx1x1 = K[:self.n_creeping, :]\n Kx2x2 = K[self.n_creeping:2*self.n_creeping, :]\n Kx1x2 = K[2*self.n_creeping:3*self.n_creeping, :]\n K = np.stack([Kx1x1.ravel(), Kx1x2.ravel(), Kx1x2.ravel(), Kx2x2.ravel()]\n ).reshape(2, 2, self.n_creeping, self.n_locked).transpose(2, 3, 0, 1)\n self.K_ext = np.einsum(\"ki,ijkl,li->ij\", self.e_s[:, ~self.ix_locked],\n K, self.e_f[:, ~self.ix_locked], optimize=True)\n \"\"\" External stress kernel [Pa/m] \"\"\"\n # get internal (within creeping patches) stress kernel\n K = Klinedisp(self.mid_x1_creeping, self.mid_x2_creeping,\n self.mid_x1_creeping, self.mid_x2_creeping,\n self.halflen_vec[~self.ix_locked],\n self.theta_vec[~self.ix_locked], self.nu, self.E\n )[:, :self.n_creeping]\n Kx1x1 = K[:self.n_creeping, :]\n Kx2x2 = K[self.n_creeping:2*self.n_creeping, :]\n Kx1x2 = K[2*self.n_creeping:3*self.n_creeping, :]\n K = np.stack([Kx1x1.ravel(), Kx1x2.ravel(), Kx1x2.ravel(), Kx2x2.ravel()]\n ).reshape(2, 2, self.n_creeping, self.n_creeping).transpose(2, 3, 0, 1)\n self.K_int = np.einsum(\"ki,ijkl,li->ij\", self.e_s[:, ~self.ix_locked],\n K, self.e_f[:, ~self.ix_locked], optimize=True)\n \"\"\" Internal stress kernel [Pa/m] \"\"\"\n self.n_state_upper = self.upper_rheo.n_vars * self.n_creeping_upper\n \"\"\" Size [-] of upper plate interface state variable \"\"\"\n self.n_state_lower = (self.lower_rheo.n_vars * self.n_creeping_lower\n if self.lower_rheo is not None\n else 2 * self.n_creeping_lower)\n \"\"\" Size [-] of lower plate interface state variable \"\"\"\n if (self.n_creeping_upper == 0) or (self.n_creeping_lower == 0):\n raise ValueError(\"Defined geometry results in zero creeping patches in \"\n \"either the upper or lower plate interface.\")\n # # if upper rheology is Burgers, tell it our specific shear modulus\n # if isinstance(self.upper_rheo, rheologies.LinearBurgers):\n # self.upper_rheo.set_G(self.K_int[:self.n_creeping_upper, :self.n_creeping_upper])\n # discretized locking depth\n self.D_lock_disc = -self.end_upper[1, self.n_locked]\n \"\"\" Discretized locking depth [m] of the upper plate interface \"\"\"\n self.x1_lock_disc = self.D_lock_disc / np.tan(self.theta)\n \"\"\" Discretized surface location [m] of end of locked interface \"\"\"\n\n\nclass SubductionSimulation():\n \"\"\"\n Subduction simulation container class.\n \"\"\"\n\n def __init__(self, v_plate, n_cycles_max, n_samples_per_eq, delta_tau_max, v_max,\n fault, Ds_0, Ds_0_logsigma, T_rec, T_rec_logsigma, D_asp_min,\n D_asp_max, T_anchor, T_last, enforce_v_plate, largehalflen,\n t_obs, pts_surf):\n \"\"\"\n Create a subduction simulation.\n\n Parameters\n ----------\n v_plate : float\n Nominal far-field plate velocity, in the dimensions of the rheology\n n_cycles_max : int\n Maximum number of cycles to simulate [-]\n n_samples_per_eq : int\n Number of internal evaluation timesteps between earthquakes [-]\n delta_tau_max : float\n Maximum shear stress change [Pa] from coseismic slip on locked patches\n v_max : float\n Maximum slip velocity [m/s] on creeping patches\n fault : Fault2D\n Fault object\n Ds_0 : numpy.ndarray\n Nominal coseismic left-lateral shearing [m] of the locked fault patch(es)\n Ds_0_logsigma : numpy.ndarray\n Standard deviation of the fault slip in logarithmic space\n T_rec : numpy.ndarray\n Nominal recurrence time [a] for each earthquake\n T_rec_logsigma : numpy.ndarray\n Standard deviation of the recurrence time in logarithmic space\n D_asp_min : numpy.ndarray\n Minimum depth [m] for the asperities of each earthquake\n D_asp_max : numpy.ndarray\n Maximum depth [m] for the asperities of each earthquake\n T_anchor : str\n Anchor date where observations end\n T_last : list\n Dates of the last occurence for each earthquake (list of strings)\n enforce_v_plate : bool\n Flag whether to allow v_plate to vary or not\n largehalflen : float\n Fault patch half-length of the deep crreep patches [m]\n t_obs : numpy.ndarray, pandas.DatetimeIndex\n Observation timesteps, either as decimal years relative to the cycle start,\n or as Timestamps\n pts_surf : numpy.ndarray\n Horizontal landward observation coordinates [m] relative to the trench\n \"\"\"\n\n # save general sequence & fault parameters\n self.v_plate = float(v_plate)\n \"\"\" Nominal far-field plate velocity, in the dimensions of the rheology \"\"\"\n self.n_cycles_max = int(n_cycles_max)\n \"\"\" Maximum number of cycles to simulate [-] \"\"\"\n self.n_samples_per_eq = int(n_samples_per_eq)\n \"\"\" Number of internal evaluation timesteps between earthquakes [-] \"\"\"\n self.delta_tau_max = float(delta_tau_max)\n \"\"\" Maximum shear stress change [Pa] from coseismic slip on locked patches \"\"\"\n self.v_max = float(v_max)\n \"\"\" Maximum slip velocity [m/s] on creeping patches \"\"\"\n\n # define fault\n assert isinstance(fault, Fault2D)\n if not (isinstance(fault.upper_rheo, NonlinearViscous) or\n isinstance(fault.upper_rheo, RateStateSteadyLogarithmic)) or \\\n not (isinstance(fault.lower_rheo, NonlinearViscous) or\n (fault.lower_rheo is None)):\n raise NotImplementedError(\"SubductionSimulation is only implemented for \"\n \"NonlinearViscous or RateStateSteadyLogarithmic \"\n \"rheologies in the upper interface, and NonlinearViscous \"\n \"rheology in the lower interface.\")\n self.fault = fault\n \"\"\" Fault object \"\"\"\n\n # cast earthquake slips as NumPy array\n self.Ds_0 = np.atleast_1d(Ds_0)\n \"\"\" Nominal coseismic left-lateral shearing [m] of the locked fault patch(es) \"\"\"\n self.Ds_0_logsigma = np.atleast_1d(Ds_0_logsigma)\n \"\"\" Standard deviation of the fault slip in logarithmic space \"\"\"\n # load recurrence times\n self.T_rec = np.atleast_1d(T_rec)\n \"\"\" Nominal recurrence time [a] for each earthquake \"\"\"\n self.T_rec_logsigma = np.atleast_1d(T_rec_logsigma)\n \"\"\" Standard deviation of the recurrence time in logarithmic space \"\"\"\n # load the minimum and maximum depths of the earthquakes\n self.D_asp_min = np.atleast_1d(D_asp_min)\n \"\"\" Minimum depth [m] for the asperities of each earthquake \"\"\"\n self.D_asp_max = np.atleast_1d(D_asp_max)\n \"\"\" Maximum depth [m] for the asperities of each earthquake \"\"\"\n assert all([D <= self.fault.D_lock for D in self.D_asp_max]), \\\n f\"Asperity depths {self.D_asp_max/1e3} km are deeper than the \" \\\n f\"locking depth {self.fault.D_lock/1e3}.\"\n self.T_anchor = str(T_anchor)\n \"\"\" Anchor date where observations end \"\"\"\n assert isinstance(T_last, list) and all([isinstance(tl, str) for tl in T_last])\n self.T_last = T_last\n \"\"\" Dates of the last occurence for each earthquake \"\"\"\n # create a NumPy array that for each locked asperity has the slip per earthquake\n self.slip_mask = np.logical_and(self.fault.mid_x2_locked.reshape(-1, 1)\n < -self.D_asp_min.reshape(1, -1),\n self.fault.mid_x2_locked.reshape(-1, 1)\n > -self.D_asp_max.reshape(1, -1))\n \"\"\" Mask that matches each earthquake to a fault patch \"\"\"\n self.T_fullcycle = np.lcm.reduce(self.T_rec)\n \"\"\" Nominal recurrence time [a] for an entire joint earthquake cycle \"\"\"\n self.n_eq = self.Ds_0.size\n \"\"\" Number of distinct earthquakes in sequence \"\"\"\n self.n_eq_per_asp = (self.T_fullcycle / self.T_rec).astype(int)\n \"\"\" Number of earthquakes per asperity and full cycle \"\"\"\n\n # create realization of the slip amount and earthquake timings\n rng = np.random.default_rng()\n # first, create realizations of occurence times\n # note that this will result in a varying plate velocity rate\n # (ignore zero-slip earthquakes)\n self.T_rec_per_asp = [rng.lognormal(np.log(t), s, n) for t, s, n in\n zip(self.T_rec, self.T_rec_logsigma, self.n_eq_per_asp)]\n \"\"\" Recurrence time [a] realization \"\"\"\n self.Ds_0_per_asp = [rng.lognormal(np.log(d), s, n) if d > 0\n else np.array([d] * n) for d, s, n in\n zip(self.Ds_0, self.Ds_0_logsigma, self.n_eq_per_asp)]\n \"\"\" Fault slip [m] realization \"\"\"\n\n # sanity check that in each asperity, the nominal plate rate is recovered\n self.slip_asperities = self.slip_mask.astype(int) * self.Ds_0.reshape(1, -1)\n \"\"\" Slip [m] for each earthquake in each asperity \"\"\"\n v_eff_in_asp = (self.slip_asperities / self.T_rec.reshape(1, -1)).sum(axis=1)\n assert np.allclose(v_eff_in_asp, self.v_plate * 86400 * 365.25), \\\n \"The nominal plate rate is not recovered in all asperities.\\n\" \\\n f\"Plate velocity = {self.v_plate * 86400 * 365.25}\\n\" \\\n f\"Effective velocity in each asperity:\\n{v_eff_in_asp}\"\n\n # second, we need to shift the random realization for each earthquake\n # individually such that they all yield the same v_plate (enforced or not)\n # get the effective recurrence time as implied by the T_rec realizations\n T_fullcycle_per_asp_eff = np.array([sum(t) for t in self.T_rec_per_asp])\n # same for the effective cumulative slip\n Ds_0_fullcycle_per_asp_eff = np.array([sum(d) for d in self.Ds_0_per_asp])\n # we need to scale each individual sequence such that it implies the same\n # recurrence time and cumulative slip in each asperity\n # (again ignoring zero-slip earthquakes)\n T_fullcycle_eff_mean = np.mean(T_fullcycle_per_asp_eff)\n Ds_0_fullcycle_mean = np.ma.masked_equal(Ds_0_fullcycle_per_asp_eff, 0).mean()\n T_rec_per_asp_adj = [np.array(self.T_rec_per_asp[i]) * T_fullcycle_eff_mean\n / T_fullcycle_per_asp_eff[i] for i in range(self.n_eq)]\n Ds_0_per_asp_adj = [np.array(self.Ds_0_per_asp[i]) * Ds_0_fullcycle_mean\n / Ds_0_fullcycle_per_asp_eff[i] if self.Ds_0[i] > 0\n else np.array(self.Ds_0_per_asp[i]) for i in range(self.n_eq)]\n # now each asperity has the same effective plate velocity, which can be different\n # from the nominal one - if we want to enforce the nominal plate velocity,\n # we can rescale the recurrence times again\n self.enforce_v_plate = bool(enforce_v_plate)\n \"\"\" Flag whether to allow v_plate to vary or not \"\"\"\n ix_nonzero_slip = np.argmax(self.Ds_0 > 0)\n v_plate_eff = (sum(Ds_0_per_asp_adj[ix_nonzero_slip])\n / sum(T_rec_per_asp_adj[ix_nonzero_slip]) / 86400 / 365.25)\n if self.enforce_v_plate:\n v_plate_factor = self.v_plate / v_plate_eff\n for i in range(self.n_eq):\n T_rec_per_asp_adj[i] /= v_plate_factor\n v_plate_eff = self.v_plate\n self.v_plate_eff = v_plate_eff\n \"\"\" Effective far-field plate velocity [m/s] \"\"\"\n self.T_eff = sum(T_rec_per_asp_adj[0])\n \"\"\" Effective length [a] of entire earthquake sequence \"\"\"\n\n # third, we need to create a list of earthquake dates and associated slips\n temp_slips = np.vstack([self.slip_mask[:, i].reshape(1, -1)\n * Ds_0_per_asp_adj[i].reshape(-1, 1)\n for i in range(self.n_eq)])\n year_offsets = [(pd.Period(self.T_anchor, \"D\") - pd.Period(self.T_last[i], \"D\")\n ).n / 365.25 for i in range(self.n_eq)]\n eq_df_index = np.concatenate(\n [self.T_eff -\n (np.cumsum(T_rec_per_asp_adj[i]) - T_rec_per_asp_adj[i] + year_offsets[i])\n for i in range(self.n_eq)])\n # round the dates to the closest day and combine earthquakes\n eq_df_index_rounded = np.around(eq_df_index * 365.25) / 365.25\n # build a DataFrame with exact and rounded times\n eq_df = pd.DataFrame(data=temp_slips)\n eq_df[\"time\"] = eq_df_index\n eq_df[\"rounded\"] = eq_df_index_rounded\n # now aggregate by rounded time, keeping the minimum exact time, and summing slip\n agg_dict = {\"time\": \"min\"}\n agg_dict.update({c: \"sum\" for c in range(self.fault.n_locked)})\n eq_df = eq_df.groupby(\"rounded\").agg(agg_dict)\n # convert time column to index and sort\n eq_df.set_index(\"time\", inplace=True)\n eq_df.sort_index(inplace=True)\n assert np.allclose(eq_df.sum(axis=0), eq_df.sum(axis=0)[0])\n self.eq_df = eq_df\n \"\"\"\n DataFrame with the dates [decimal year from cycle start] and slips [m]\n for each asperity\n \"\"\"\n\n # fourth, we need to create a list of dates to use internally when evaluating\n # the earthquake cycle - this is independent of the observation dates\n i_frac_cumsum = np.concatenate([[self.eq_df.index[-1] - self.T_eff],\n self.eq_df.index.values])\n T_frac = np.diff(i_frac_cumsum)\n t_eval = np.concatenate(\n [np.logspace(0, np.log10(1 + T_frac[i]), self.n_samples_per_eq, endpoint=False)\n - 1 + i_frac_cumsum[i] + j*self.T_eff\n for j in range(self.n_cycles_max) for i, t in enumerate(T_frac)])\n num_neg = (t_eval < 0).sum()\n t_eval = np.roll(t_eval, -num_neg)\n t_eval[-num_neg:] += self.n_cycles_max * self.T_eff\n self.t_eval = np.sort(np.concatenate(\n [t_eval, np.arange(self.n_cycles_max + 1) * self.T_eff]))\n \"\"\" Internal evaluation timesteps [decimal years since cycle start] \"\"\"\n self.n_eval = self.t_eval.size\n \"\"\" Number of internal evaluation timesteps [-] \"\"\"\n\n # fifth, for the integration, we need the indices of the timesteps that mark either\n # an earthquake or the start of a new cycle\n self.n_slips = self.eq_df.shape[0]\n \"\"\" Number of slips in a sequence [-] \"\"\"\n self.ix_break = [i*(self.n_slips * self.n_samples_per_eq + 1)\n for i in range(self.n_cycles_max + 1)]\n \"\"\" Indices of breaks between cycles \"\"\"\n self.ix_eq = [self.ix_break[i] + j * self.n_samples_per_eq - num_neg + 1\n for i in range(self.n_cycles_max) for j in range(1, 1 + self.n_slips)]\n \"\"\" Indices of earthquakes \"\"\"\n\n # sixth and last, for the final loop, we need a joint timesteps array between internal\n # and external (observation) timestamps, such that we can debug, check early stopping,\n # and restrict the output to the requested timeseries\n if isinstance(t_obs, pd.DatetimeIndex):\n t_obs = self.T_eff + (t_obs - pd.Timestamp(self.T_anchor)\n ).total_seconds().values / 86400 / 365.25\n elif isinstance(t_obs, np.ndarray):\n if np.all(t_obs < 0):\n # this format is relative to T_anchor and more stable when T_eff varies\n t_obs = self.T_eff + t_obs\n assert np.all(t_obs >= 0) and np.all(t_obs < self.T_eff), \\\n f\"Range of 't_obs' ({t_obs.min()}-{t_obs.max():} years) outside of \" \\\n f\"the earthquake cycle period ({self.T_eff:} years).\"\n else:\n raise ValueError(\"Unknown 't_obs' data type.\")\n self.t_obs = t_obs\n \"\"\" Observation timesteps [decimal years since cycle start] \"\"\"\n # combine all possible timesteps\n t_obs_shifted = self.t_obs + (self.n_cycles_max - 1) * self.T_eff\n self.t_eval_joint = np.unique(np.concatenate((self.t_eval, t_obs_shifted)))\n \"\"\"\n Joint internal evaluation and external observation timesteps\n [decimal years since cycle start]\n \"\"\"\n # get indices of each individual subset in the new timesteps array\n self.ix_break_joint = \\\n np.flatnonzero(np.isin(self.t_eval_joint, self.t_eval[self.ix_break]))\n \"\"\" Indices of breaks between cycles in joint timesteps \"\"\"\n self.ix_eq_joint = \\\n np.flatnonzero(np.isin(self.t_eval_joint, self.t_eval[self.ix_eq]))\n \"\"\" Indices of earthquakes in joint timesteps \"\"\"\n self.ix_obs_joint = \\\n np.flatnonzero(np.isin(self.t_eval_joint, t_obs_shifted))\n \"\"\" Indices of observation timestamps in joint timesteps \"\"\"\n\n # get vectors of upper plate rheology parameters\n if isinstance(self.fault.upper_rheo, RateStateSteadyLogarithmic):\n # alpha_h\n self.alpha_h_vec = \\\n self.fault.upper_rheo.get_param_vectors(\n -self.fault.mid_x2_creeping[:self.fault.n_creeping_upper])\n r\"\"\" Depth-variable :math:`(a - b) * \\sigma_E` [Pa] of upper plate interface \"\"\"\n elif isinstance(self.fault.upper_rheo, NonlinearViscous):\n # A, alpha_n, and n\n alpha_n_vec, n_vec, A_vec = \\\n self.fault.upper_rheo.get_param_vectors(\n -self.fault.mid_x2_creeping[:self.fault.n_creeping_upper], self.v_plate)\n self.alpha_n_vec = alpha_n_vec\n r\"\"\" Depth-variable :math:`\\alpha_n` [Pa^n * s/m] of upper plate interface \"\"\"\n self.n_vec = n_vec\n r\"\"\" Depth-variable :math:`n` [-] of upper plate interface \"\"\"\n self.A_vec = A_vec\n r\"\"\" Depth-variable :math:`A ` [Pa * (s/m)^(1/n)] of upper plate interface \"\"\"\n else:\n raise NotImplementedError\n\n # get unbounded delta_tau\n self.delta_tau_unbounded = self.fault.K_ext @ self.eq_df.values.T\n \"\"\" Unbounded coseismic stress change [Pa] \"\"\"\n # get pseudoinverse of K_int for tapered slip\n self.K_int_inv_upper = np.linalg.pinv(\n self.fault.K_int[:self.fault.n_creeping_upper, :self.fault.n_creeping_upper])\n \"\"\" Inverse of K_int [m/Pa] \"\"\"\n self.delta_tau_max_from_v_max_lower = \\\n ((self.fault.lower_rheo.alpha_n * self.v_max)**(1 / self.fault.lower_rheo.n) -\n (self.fault.lower_rheo.alpha_n * self.v_plate)**(1 / self.fault.lower_rheo.n)\n if self.fault.lower_rheo is not None else np.inf)\n \"\"\" Maximum shear stress change [Pa] in lower plate from capped velocity \"\"\"\n if isinstance(self.fault.upper_rheo, NonlinearViscous):\n delta_tau_max_from_v_max_upper = \\\n (self.alpha_n_vec * self.v_max)**(1 / self.n_vec) - \\\n (self.alpha_n_vec * self.v_plate)**(1 / self.n_vec)\n elif isinstance(self.fault.upper_rheo, RateStateSteadyLogarithmic):\n delta_tau_max_from_v_max_upper = self.alpha_h_vec * \\\n (np.log(self.v_max / self.fault.upper_rheo.v_0) -\n np.log(self.v_plate / self.fault.upper_rheo.v_0))\n self.delta_tau_max_from_v_max_upper = delta_tau_max_from_v_max_upper\n \"\"\" Maximum shear stress change [Pa] in upper plate from capped velocity \"\"\"\n self.delta_tau_max_joint_upper = np.fmin(self.delta_tau_max,\n self.delta_tau_max_from_v_max_upper)\n \"\"\" Joint maximum shear stress change [Pa] allowed in upper plate \"\"\"\n self.delta_tau_max_joint_lower = \\\n (min(self.delta_tau_max, self.delta_tau_max_from_v_max_lower)\n if self.fault.lower_rheo is not None else np.inf)\n \"\"\" Joint maximum shear stress change [Pa] allowed in lower plate \"\"\"\n # create tapered slip by making delta_tau linearly increase until delta_tau_max\n delta_tau_bounded = self.delta_tau_unbounded.copy()\n delta_tau_bounded[:self.fault.n_creeping_upper, :] = \\\n np.fmin(self.delta_tau_max_joint_upper.reshape(-1, 1),\n self.delta_tau_unbounded[:self.fault.n_creeping_upper, :])\n self.delta_tau_bounded = delta_tau_bounded\n \"\"\" Bounded coseismic stress change [Pa] \"\"\"\n # get the additional slip\n self.slip_taper = (self.K_int_inv_upper @\n (self.delta_tau_bounded - self.delta_tau_unbounded\n )[:self.fault.n_creeping_upper, :])\n # check if the lower plate should have been bounded as well\n if self.fault.lower_rheo is not None:\n assert not np.any(np.abs(self.delta_tau_bounded[self.fault.n_creeping_upper:, :])\n > self.delta_tau_max_joint_lower), \\\n (\"Maximum stress change delta_tau_bounded \"\n f\"{np.max(np.abs(self.delta_tau_bounded)):.2e} Pa in lower interface \"\n f\"above delta_tau_max = {self.delta_tau_max_joint_lower:.2e} Pa\")\n self.slip_taper_ts = \\\n pd.DataFrame(index=self.eq_df.index, data=self.slip_taper.T) \\\n .cumsum(axis=0).reindex(index=self.t_obs, method=\"ffill\", fill_value=0)\n \"\"\" Timeseries of tapered slip [m] on the upper creeping fault patches \"\"\"\n\n # need the imagined location and orientation of the deep creep patches\n self.largehalflen = float(largehalflen)\n \"\"\" Fault patch half-length of the deep crreep patches [m] \"\"\"\n self.mid_deep_x1 = \\\n np.array([self.fault.mid_x1[self.fault.n_upper - 1]\n + np.cos(self.fault.theta_vec[self.fault.n_upper - 1])\n * self.fault.halflen_vec[self.fault.n_upper - 1]\n + np.cos(self.fault.theta_vec[self.fault.n_upper - 1])\n * self.largehalflen,\n self.fault.mid_x1[self.fault.n_upper + self.fault.n_lower_left - 1]\n - self.fault.halflen_vec[self.fault.n_upper + self.fault.n_lower_left - 1]\n - self.largehalflen,\n self.fault.mid_x1[-1]\n + np.cos(self.fault.theta_vec[-1] - np.pi)\n * self.fault.halflen_vec[-1]\n + np.cos(self.fault.theta_vec[-1] - np.pi)\n * self.largehalflen])\n \"\"\" :math:`x_1` coordinates of deep creep fault patch midpoints [m] \"\"\"\n self.mid_deep_x2 = \\\n np.array([self.fault.mid_x2[self.fault.n_upper - 1]\n - np.sin(self.fault.theta_vec[self.fault.n_upper - 1])\n * self.fault.halflen_vec[self.fault.n_upper - 1]\n - np.sin(self.fault.theta_vec[self.fault.n_upper - 1])\n * self.largehalflen,\n self.fault.mid_x2[self.fault.n_upper + self.fault.n_lower_left - 1],\n self.fault.mid_x2[-1]\n - np.sin(self.fault.theta_vec[-1] - np.pi)\n * self.fault.halflen_vec[-1]\n - np.sin(self.fault.theta_vec[-1] - np.pi)\n * self.largehalflen])\n \"\"\" :math:`x_2` coordinates of deep creep fault patch midpoints [m] \"\"\"\n self.theta_vec_deep = \\\n np.array([self.fault.theta_vec[self.fault.n_upper - 1],\n np.pi,\n self.fault.theta_vec[-1]])\n \"\"\" Plate dip angle [rad] for deep creep fault patches \"\"\"\n\n # create the Green's matrices\n self.pts_surf = pts_surf\n \"\"\" :math:`x_1` coordinates of surface observation points [m] \"\"\"\n self.n_stations = self.pts_surf.size\n \"\"\" Number of surface observing stations \"\"\"\n self.G_surf_fault = Glinedisp(\n self.pts_surf, 0, self.fault.mid_x1, self.fault.mid_x2,\n self.fault.halflen_vec, self.fault.theta_vec, self.fault.nu\n )[:, :self.fault.mid_x1.size]\n \"\"\" Green's matrix [-] relating slip on the main fault patches to surface motion \"\"\"\n self.G_surf_deep = Glinedisp(\n self.pts_surf, 0, self.mid_deep_x1, self.mid_deep_x2,\n self.largehalflen, self.theta_vec_deep, self.fault.nu)[:, :3]\n \"\"\" Green's matrix [-] relating slip on the deep creep patches to surface motion \"\"\"\n self.G_surf = np.hstack([self.G_surf_fault, self.G_surf_deep])\n \"\"\" Joint Green's matrix [-] relating slip on the entire ESPM to surface motion \"\"\"\n\n # calculate the best initial velocity state from the steady state ODE\n v_plate_vec = np.ones(self.fault.n_creeping) * self.v_plate\n v_plate_vec[self.fault.n_creeping_upper:] *= -1\n self.v_plate_vec = v_plate_vec\n \"\"\" Vector with the plate velocity for each creeping patch [m/s] \"\"\"\n # get the initial velocity, taking advantage of the option that there could be a\n # deep transition zone\n v_init = v_plate_vec.copy()\n if self.fault.upper_rheo.deep_transition is not None:\n ix_deep = np.argmin(np.abs(-self.fault.mid_x2_creeping[:self.fault.n_creeping_upper]\n - self.fault.upper_rheo.deep_transition\n - self.fault.upper_rheo.deep_transition_width))\n if isinstance(self.fault.upper_rheo, RateStateSteadyLogarithmic):\n v_init[:ix_deep] = np.linspace(self.v_plate * 1e-6, self.v_plate,\n num=ix_deep, endpoint=False)\n elif isinstance(self.fault.upper_rheo, NonlinearViscous):\n v_init[:ix_deep] = np.linspace(0, self.v_plate, num=ix_deep, endpoint=False)\n self.v_init = v_init\n \"\"\" Initial velocity in all creeping patches [m/s] \"\"\"\n\n @property\n def locked_slip(self):\n \"\"\" Timeseries of slip [m] on the locked patches for observation timespan \"\"\"\n return self.eq_df.cumsum(axis=0) \\\n .reindex(index=self.t_obs, method=\"ffill\", fill_value=0).values.T\n\n @property\n def deep_creep_slip(self):\n \"\"\" Timeseries of slip [m] on the deep creep patches for observation timestamps \"\"\"\n return (np.tile(self.t_obs.reshape(1, -1), (3, 1))\n * np.array([1, -1, -1]).reshape(3, 1)\n * self.v_plate_eff * 86400 * 365.25)\n\n @staticmethod\n def read_config_file(config_file):\n \"\"\"\n Read a configuration file and return it as a parsed dictionary.\n\n Parameters\n ----------\n config_file : str\n Path to INI configuration file.\n\n Returns\n -------\n cfg_dict : dict\n Parsed configuration file.\n \"\"\"\n\n # load configuration file\n cfg = configparser.ConfigParser()\n cfg.optionxform = str\n with open(config_file, mode=\"rt\") as f:\n cfg.read_file(f)\n cfg_seq, cfg_fault, cfg_mesh = cfg[\"sequence\"], cfg[\"fault\"], cfg[\"mesh\"]\n\n # parse rheologies\n upper_rheo_dict = dict(cfg[\"upper_rheo\"])\n upper_rheo_type = upper_rheo_dict.pop(\"type\")\n upper_rheo_kw_args = {k: float(v) for k, v in upper_rheo_dict.items()}\n try:\n lower_rheo_dict = dict(cfg[\"lower_rheo\"])\n except KeyError:\n lower_rheo_type = None\n lower_rheo_kw_args = None\n else:\n lower_rheo_type = lower_rheo_dict.pop(\"type\")\n lower_rheo_kw_args = {k: float(v) for k, v in lower_rheo_dict.items()}\n\n # parse everything else\n cfg_dict = {\n \"theta\": np.deg2rad(cfg_fault.getfloat(\"theta_deg\")),\n \"D_lock\": cfg_fault.getfloat(\"D_lock\"),\n \"H\": cfg_fault.getfloat(\"H\"),\n \"nu\": cfg_fault.getfloat(\"nu\"),\n \"E\": cfg_fault.getfloat(\"E\"),\n \"v_s\": cfg_fault.getfloat(\"v_s\"),\n \"halflen\": cfg_mesh.getfloat(\"halflen\"),\n \"n_upper\": cfg_mesh.getint(\"n_up\"),\n \"n_lower_left\": cfg_mesh.getint(\"n_low_l\"),\n \"n_lower_right\": cfg_mesh.getint(\"n_low_r\"),\n \"halflen_factor_lower\": cfg_mesh.getfloat(\"halflen_factor_lower\"),\n \"D_max\": cfg_mesh.getfloat(\"D_max\", fallback=None),\n \"x1_pretrench\": cfg_mesh.getfloat(\"x1_pretrench\", fallback=None),\n \"v_plate\": cfg_seq.getfloat(\"v_plate\"),\n \"n_cycles_max\": cfg_seq.getint(\"n_cycles_max\"),\n \"n_samples_per_eq\": cfg_seq.getint(\"n_samples_per_eq\"),\n \"delta_tau_max\": cfg_fault.getfloat(\"delta_tau_max\", fallback=np.inf),\n \"v_max\": cfg_fault.getfloat(\"v_max\", fallback=np.inf),\n \"Ds_0\": np.atleast_1d(json.loads(cfg_seq[\"Ds_0\"])),\n \"Ds_0_logsigma\": np.atleast_1d(json.loads(cfg_seq[\"Ds_0_logsigma\"])),\n \"T_rec\": np.atleast_1d(json.loads(cfg_seq[\"T_rec\"])),\n \"T_rec_logsigma\": np.atleast_1d(json.loads(cfg_seq[\"T_rec_logsigma\"])),\n \"D_asp_min\": np.atleast_1d(json.loads(cfg_seq[\"D_asp_min\"])),\n \"D_asp_max\": np.atleast_1d(json.loads(cfg_seq[\"D_asp_max\"])),\n \"T_anchor\": cfg_seq.get(\"T_anchor\"),\n \"T_last\": json.loads(cfg_seq[\"T_last\"]),\n \"enforce_v_plate\": cfg_seq.getboolean(\"enforce_v_plate\"),\n \"largehalflen\": cfg_mesh.getfloat(\"largehalflen\"),\n \"upper_rheo_type\": upper_rheo_type,\n \"lower_rheo_type\": lower_rheo_type,\n \"upper_rheo_kw_args\": upper_rheo_kw_args,\n \"lower_rheo_kw_args\": lower_rheo_kw_args\n }\n return cfg_dict\n\n @classmethod\n def from_config_dict(cls, cfg, t_obs, pts_surf):\n \"\"\"\n Create a SubductionSimulation object from a configuration dictionary.\n\n Parameters\n ----------\n cfg : dict\n Dictionary containing all parsed elements from the configuration file\n t_obs : numpy.ndarray, pandas.DatetimeIndex\n Observation timesteps, either as decimal years relative to the cycle start,\n or as Timestamps\n pts_surf : numpy.ndarray\n Horizontal landward observation coordinates [m] relative to the trench\n\n See Also\n --------\n read_config_file : To load a configuration file into a dictionary.\n \"\"\"\n\n # create rheology objects\n upper_rheo = globals()[cfg[\"upper_rheo_type\"]](**cfg[\"upper_rheo_kw_args\"])\n if cfg[\"lower_rheo_type\"] is None:\n lower_rheo = None\n else:\n lower_rheo = globals()[cfg[\"lower_rheo_type\"]](**cfg[\"lower_rheo_kw_args\"])\n\n # create fault object\n fault = Fault2D(theta=cfg[\"theta\"],\n D_lock=cfg[\"D_lock\"],\n H=cfg[\"H\"],\n nu=cfg[\"nu\"],\n E=cfg[\"E\"],\n v_s=cfg[\"v_s\"],\n halflen=cfg[\"halflen\"],\n upper_rheo=upper_rheo,\n n_upper=cfg[\"n_upper\"],\n lower_rheo=lower_rheo,\n n_lower_left=cfg[\"n_lower_left\"],\n n_lower_right=cfg[\"n_lower_right\"],\n halflen_factor_lower=cfg[\"halflen_factor_lower\"],\n D_max=cfg[\"D_max\"],\n x1_pretrench=cfg[\"x1_pretrench\"])\n\n # create simulation object\n return cls(v_plate=cfg[\"v_plate\"],\n n_cycles_max=cfg[\"n_cycles_max\"],\n n_samples_per_eq=cfg[\"n_samples_per_eq\"],\n delta_tau_max=cfg[\"delta_tau_max\"],\n v_max=cfg[\"v_max\"],\n fault=fault,\n Ds_0=cfg[\"Ds_0\"],\n Ds_0_logsigma=cfg[\"Ds_0_logsigma\"],\n T_rec=cfg[\"T_rec\"],\n T_rec_logsigma=cfg[\"T_rec_logsigma\"],\n D_asp_min=cfg[\"D_asp_min\"],\n D_asp_max=cfg[\"D_asp_max\"],\n T_anchor=cfg[\"T_anchor\"],\n T_last=cfg[\"T_last\"],\n enforce_v_plate=cfg[\"enforce_v_plate\"],\n largehalflen=cfg[\"largehalflen\"],\n t_obs=t_obs,\n pts_surf=pts_surf)\n\n @staticmethod\n def get_n(alpha_n, alpha_eff, v_eff):\n r\"\"\"\n Calculate the real linear viscous strength constant from the effective one.\n\n Parameters\n ----------\n alpha_n : float\n Nonlinear viscous rheology strength constant :math:`\\alpha_n` [Pa^n * s/m]\n alpha_eff : float\n Effective linear viscous strength constant [Pa * s/m]\n v_eff : float\n Effective velocity [m/s] used for ``alpha_eff`` conversions\n\n Returns\n -------\n n : float\n Power-law exponent :math:`n` [-]\n \"\"\"\n return (np.log(alpha_n) + np.log(v_eff)) / (np.log(alpha_eff) + np.log(v_eff))\n\n @staticmethod\n def get_alpha_n(alpha_eff, n, v_eff):\n r\"\"\"\n Calculate the real linear viscous strength constant from the effective one.\n\n Parameters\n ----------\n alpha_eff : float\n Effective linear viscous strength constant [Pa * s/m]\n n : float\n Power-law exponent :math:`n` [-]\n v_eff : float\n Effective velocity [m/s] used for ``alpha_eff`` conversions\n\n Returns\n -------\n alpha_n : float\n Nonlinear viscous rheology strength constant :math:`\\alpha_n` [Pa^n * s/m]\n \"\"\"\n alpha_n = alpha_eff**n * v_eff**(n-1)\n return alpha_n\n\n @staticmethod\n def get_alpha_eff(alpha_n, n, v_eff):\n r\"\"\"\n Calculate the effective linear viscous strength constant from the real one.\n\n Parameters\n ----------\n alpha_n : float\n Nonlinear viscous rheology strength constant :math:`\\alpha_n` [Pa^n * s/m]\n n : float\n Power-law exponent :math:`n` [-]\n v_eff : float\n Effective velocity [m/s] used for ``alpha_eff`` conversions\n\n Returns\n -------\n alpha_eff : float\n Effective linear viscous strength constant [Pa * s/m]\n \"\"\"\n if isinstance(v_eff, np.ndarray):\n temp = v_eff.copy()\n temp[temp == 0] = np.NaN\n else:\n temp = v_eff\n alpha_eff = alpha_n**(1/n) * temp**((1-n)/n)\n return alpha_eff\n\n @staticmethod\n def get_alpha_eff_from_alpha_h(alpha_h, v_eff):\n r\"\"\"\n Calculate the effective viscosity from the rate-dependent friction.\n\n Parameters\n ----------\n alpha_h : float\n Rate-and-state parameter :math:`(a - b) * \\sigma_E`,\n where :math:`a` and :math:`b` [-] are the rate-and-state frictional properties,\n and :math:`\\sigma_E` [Pa] is effective fault normal stress.\n v_eff : float\n Effective velocity [m/s] used for ``alpha_eff`` conversions\n\n Returns\n -------\n alpha_eff : float\n Effective linear viscous strength constant [Pa * s/m]\n \"\"\"\n if isinstance(v_eff, np.ndarray):\n temp = v_eff.copy()\n temp[temp == 0] = np.NaN\n else:\n temp = v_eff\n alpha_eff = alpha_h / temp\n return alpha_eff\n\n def run(self, simple_rk4=False):\n \"\"\"\n Run a full simulation.\n \"\"\"\n # run forward integration\n if self.fault.lower_rheo is None:\n if isinstance(self.fault.upper_rheo, RateStateSteadyLogarithmic):\n full_state = flat_run_rdlog(\n self.t_eval_joint * 86400 * 365.25, self.ix_break_joint, self.ix_eq_joint,\n self.fault.n_creeping_upper, self.fault.n_creeping_lower, self.fault.K_int,\n self.fault.K_ext, self.v_plate_vec, self.v_init, self.slip_taper,\n self.delta_tau_bounded, self.fault.upper_rheo.v_0, self.alpha_h_vec,\n self.fault.mu_over_2vs)\n elif isinstance(self.fault.upper_rheo, NonlinearViscous):\n full_state = flat_run_plvis(\n self.t_eval_joint * 86400 * 365.25, self.ix_break_joint, self.ix_eq_joint,\n self.fault.n_creeping_upper, self.fault.n_creeping_lower, self.fault.K_int,\n self.fault.K_ext, self.v_plate_vec, self.v_init, self.slip_taper,\n self.delta_tau_bounded, self.alpha_n_vec, self.n_vec, self.A_vec,\n self.fault.mu_over_2vs)\n else:\n raise NotImplementedError\n elif isinstance(self.fault.lower_rheo, NonlinearViscous):\n if isinstance(self.fault.upper_rheo, NonlinearViscous):\n full_state = flat_run_plvis_plvis(\n self.t_eval_joint * 86400 * 365.25, self.ix_break_joint, self.ix_eq_joint,\n self.fault.n_creeping_upper, self.fault.n_creeping_lower, self.fault.K_int,\n self.fault.K_ext, self.v_plate_vec, self.v_init, self.slip_taper,\n self.delta_tau_bounded, self.fault.upper_rheo.alpha_n,\n self.fault.upper_rheo.n, self.fault.lower_rheo.alpha_n,\n self.fault.lower_rheo.n, simple_rk4)\n elif isinstance(self.fault.upper_rheo, RateStateSteadyLogarithmic):\n full_state = flat_run_rdlog_plvis(\n self.t_eval_joint * 86400 * 365.25, self.ix_break_joint, self.ix_eq_joint,\n self.fault.n_creeping_upper, self.fault.n_creeping_lower, self.fault.K_int,\n self.fault.K_ext, self.v_plate_vec, self.v_init, self.slip_taper,\n self.delta_tau_bounded, self.fault.upper_rheo.v_0,\n self.fault.upper_rheo.alpha_h, self.fault.lower_rheo.alpha_n,\n self.fault.lower_rheo.n, simple_rk4)\n else:\n raise NotImplementedError\n else:\n raise NotImplementedError\n # extract the observations that were actually requested\n obs_state = full_state[:, self.ix_obs_joint].copy()\n # since we're only calculating transient surface displacements, need to\n # remove the tapered slip due to bounded stresses\n obs_state[:self.fault.n_creeping_upper, :] -= self.slip_taper_ts.values.T\n # convert to surface displacements\n surf_disps = get_surface_displacements_plvis_plvis(\n obs_state, self.fault.n_creeping_upper, self.fault.n_creeping_lower,\n np.ascontiguousarray(self.G_surf[:, self.fault.n_locked:]),\n self.deep_creep_slip)\n return full_state, obs_state, surf_disps\n\n def zero_obs_at_eq(self, surf_disps):\n \"\"\"\n Reset to zero the surface displacement timeseries every time an earthquake happens.\n \"\"\"\n obs_zeroed = surf_disps.copy()\n slips_obs = np.logical_and(self.t_obs.min() <= self.eq_df.index,\n self.t_obs.max() > self.eq_df.index)\n n_slips_obs = slips_obs.sum()\n if n_slips_obs == 0:\n obs_zeroed -= obs_zeroed[:, 0].reshape(-1, 1)\n else:\n i_slips_obs = [np.argmax(self.t_obs >= t_eq) for t_eq\n in self.eq_df.index.values[slips_obs]]\n obs_zeroed[:, :i_slips_obs[0]] -= obs_zeroed[:, i_slips_obs[0] - 1].reshape(-1, 1)\n obs_zeroed[:, i_slips_obs[0]:] -= obs_zeroed[:, i_slips_obs[0]].reshape(-1, 1)\n for i in range(1, n_slips_obs):\n obs_zeroed[:, i_slips_obs[i]:] -= obs_zeroed[:, i_slips_obs[i]].reshape(-1, 1)\n return obs_zeroed\n\n def _reduce_full_state(self, data):\n # get all NaN columns\n cols_all_nan = np.all(np.isnan(data), axis=0)\n # check if there was early stopping\n if cols_all_nan.sum() > 0:\n # get the border indices where integrations have been skipped\n ix_last, ix_first = np.flatnonzero(cols_all_nan)[[0, -1]]\n ix_last -= 1\n ix_first += 1\n # get indices before and after the NaN period\n ix_valid = np.r_[0:ix_last, ix_first:self.t_eval_joint.size]\n # subset data\n data = data[:, ix_valid]\n t_sub = self.t_eval_joint[ix_valid].copy()\n t_sub[ix_last:] -= self.t_eval_joint[ix_first] - self.t_eval_joint[ix_last]\n n_cyc_completed = int(np.round(self.t_eval_joint[ix_last] / self.T_eff)) + 1\n else:\n t_sub = self.t_eval_joint.copy()\n n_cyc_completed = self.n_cycles_max + 1\n # done\n return data, t_sub, n_cyc_completed\n\n def plot_surface_displacements(self, obs_zeroed, obs_noisy=None):\n \"\"\"\n Plot the observers' surface displacement timeseries.\n\n Parameters\n ----------\n obs_zeroed : numpy.ndarray\n Surface displacements as output by :meth:`~zero_obs_at_eq`.\n obs_noisy : numpy.ndarray, optional\n Noisy surface observations.\n\n Returns\n -------\n matplotlib.figure.Figure\n matplotlib.axes.Axes\n \"\"\"\n import matplotlib.pyplot as plt\n # some helper variables\n isort = np.argsort(self.pts_surf)\n i_off = 3 * np.std(obs_zeroed.ravel())\n # get float dates of observed earthquakes\n slips_obs = np.logical_and(self.t_obs.min() <= self.eq_df.index,\n self.t_obs.max() > self.eq_df.index)\n n_slips_obs = slips_obs.sum()\n if n_slips_obs > 0:\n i_slips_obs = [np.argmax(self.t_obs >= t_eq) for t_eq\n in self.eq_df.index.values[slips_obs]]\n t_last_slips = [self.t_obs[islip] for islip in i_slips_obs]\n else:\n t_last_slips = []\n # start plot\n fig, ax = plt.subplots(nrows=2, sharex=True, layout=\"constrained\")\n for tslip in t_last_slips:\n ax[0].axvline(tslip, c=\"0.7\", zorder=-1)\n ax[1].axvline(tslip, c=\"0.7\", zorder=-1)\n for i, ix in enumerate(isort):\n if obs_noisy is not None:\n ax[0].plot(self.t_obs, obs_noisy[ix, :] + i*i_off,\n \".\", c=\"k\", rasterized=True)\n ax[1].plot(self.t_obs, obs_noisy[ix + self.n_stations, :] + i*i_off,\n \".\", c=\"k\", rasterized=True)\n ax[0].plot(self.t_obs, obs_zeroed[ix, :] + i*i_off, c=f\"C{i}\")\n ax[1].plot(self.t_obs, obs_zeroed[ix + self.n_stations, :] + i*i_off, c=f\"C{i}\")\n ax[1].set_xlabel(\"Time\")\n ax[0].set_ylabel(\"Horizontal [m]\")\n ax[1].set_ylabel(\"Vertical [m]\")\n fig.suptitle(\"Surface Displacement\")\n return fig, ax\n\n def plot_fault_velocities(self, full_state):\n \"\"\"\n Plot the velocities on all creeping fault patches.\n\n Parameters\n ----------\n full_state : numpy.ndarray\n State matrix as output from :meth:`~run`.\n\n Returns\n -------\n matplotlib.figure.Figure\n matplotlib.axes.Axes\n \"\"\"\n import matplotlib.pyplot as plt\n from matplotlib.colors import SymLogNorm\n from cmcrameri import cm\n # extract velocities\n vels = full_state[np.r_[self.fault.n_creeping_upper:self.fault.n_state_upper,\n self.fault.n_state_upper + self.fault.n_creeping_lower:\n self.fault.n_state_upper + self.fault.n_state_lower],\n :] / self.v_plate\n # check whether the simulation spun up, and NaN data needs to be skipped\n vels, t_sub, n_cyc_completed = self._reduce_full_state(vels)\n # normalize time\n t_sub /= self.T_eff\n # prepare plot\n norm = SymLogNorm(linthresh=1, vmin=-1, vmax=100)\n if self.fault.lower_rheo is None:\n fig, ax = plt.subplots(figsize=(10, 5), layout=\"constrained\")\n ax = [ax]\n else:\n fig, ax = plt.subplots(nrows=2, sharex=True, figsize=(10, 5), layout=\"constrained\")\n # plot velocities\n c = ax[0].pcolormesh(t_sub,\n self.fault.end_upper[0, self.fault.n_locked:] / 1e3,\n vels[:self.fault.n_creeping_upper, :-1],\n norm=norm, cmap=cm.vik, shading=\"flat\")\n ax[0].set_yticks(self.fault.end_upper[0, [self.fault.n_locked, -1]] / 1e3)\n # add vertical lines for cycle breaks\n for n in range(1, n_cyc_completed):\n ax[0].axvline(n, c=\"k\", lw=1)\n # make the y-axis increasing downwards to mimic depth even though we're plotting x1\n ax[0].invert_yaxis()\n # repeat for lower interface, if simulated\n if self.fault.lower_rheo is not None:\n c = ax[1].pcolormesh(t_sub,\n self.fault.end_lower[0, :] / 1e3,\n -vels[self.fault.n_creeping_upper:, :-1],\n norm=norm, cmap=cm.vik, shading=\"flat\")\n ax[1].set_yticks(self.fault.end_lower[0, [0, -1]] / 1e3)\n # add horizontal lines to show where the lower interface is below the locked zone\n ax[1].axhline(0, c=\"k\", lw=1)\n ax[1].axhline(self.fault.x1_lock / 1e3, c=\"k\", lw=1)\n for n in range(1, n_cyc_completed):\n ax[1].axvline(n, c=\"k\", lw=1)\n ax[1].invert_yaxis()\n # finish figure\n if self.fault.lower_rheo is None:\n ax[0].set_ylabel(\"Upper Interface\\n$x_1$ [km]\")\n ax[0].set_xlabel(\"Normalized Time $t/T$\")\n else:\n ax[0].set_ylabel(\"Upper Interface\\n$x_1$ [km]\")\n ax[1].set_ylabel(\"Lower Interface\\n$x_1$ [km]\")\n ax[1].set_xlabel(\"Normalized Time $t/T$\")\n fig.colorbar(c, ax=ax, location=\"right\", orientation=\"vertical\", fraction=0.05,\n label=\"$v/v_{plate}$\")\n fig.suptitle(\"Normalized Fault Patch Velocities\")\n return fig, ax\n\n def plot_fault_slip(self, full_state, deficit=True, include_locked=True, include_deep=True):\n \"\"\"\n Plot the cumulative slip (deficit) for the fault patches.\n\n Parameters\n ----------\n full_state : numpy.ndarray\n State matrix as output from :meth:`~run`.\n deficit : bool, optional\n If ``True`` (default), remove the plate velocity to plot slip deficit,\n otherwise keep it included.\n include_locked : bool, optional\n If ``True`` (default), also plot the slip on the locked patches.\n include_deep : bool, optional\n If ``True`` (default), also plot the slip on the semi-infinite patches\n at the end of the interfaces.\n\n Returns\n -------\n matplotlib.figure.Figure\n matplotlib.axes.Axes\n \"\"\"\n import matplotlib.pyplot as plt\n from matplotlib.colors import Normalize, SymLogNorm\n from cmcrameri import cm\n # extract slip\n slip = full_state[np.r_[:self.fault.n_creeping_upper,\n self.fault.n_state_upper:\n self.fault.n_state_upper + self.fault.n_creeping_lower], :]\n # check whether the simulation spun up, and NaN data needs to be skipped\n slip, t_sub, n_cyc_completed = self._reduce_full_state(slip)\n # normalize to slip per full cycle\n cum_slip_per_cycle = self.v_plate_eff * self.T_eff * 86400 * 365.25\n slip /= cum_slip_per_cycle\n # add optional slip histories, if desired\n if include_locked:\n eq_df_joint = pd.DataFrame(\n index=(self.eq_df.index.values.reshape(1, -1)\n + self.T_eff * np.arange(n_cyc_completed).reshape(-1, 1)\n ).ravel(),\n data=np.tile(self.eq_df.values, (n_cyc_completed, 1)))\n locked_slip = eq_df_joint.cumsum(axis=0) \\\n .reindex(index=t_sub, method=\"ffill\", fill_value=0).values.T\n locked_slip /= cum_slip_per_cycle\n if include_deep:\n deep_creep_slip = (np.tile(t_sub.reshape(1, -1), (3, 1))\n * np.array([1, -1, -1]).reshape(3, 1)\n * self.v_plate_eff * 86400 * 365.25)\n deep_creep_slip /= cum_slip_per_cycle\n # remove plate velocity to get slip deficit, if desired\n if deficit:\n cmap = cm.vik\n norm = SymLogNorm(linthresh=1e-2, vmin=-1, vmax=1)\n slip[:self.fault.n_creeping_upper] -= t_sub.reshape(1, -1) / self.T_eff\n slip[self.fault.n_creeping_upper:] += t_sub.reshape(1, -1) / self.T_eff\n slip -= slip[:, -2].reshape(-1, 1)\n if include_locked:\n locked_slip -= t_sub.reshape(1, -1) / self.T_eff\n if include_deep:\n deep_creep_slip -= (t_sub.reshape(1, -1)\n * np.array([1, -1, -1]).reshape(3, 1)) / self.T_eff\n else:\n norm = Normalize(vmin=0, vmax=n_cyc_completed)\n cmap = cm.batlow\n # normalize time\n t_sub /= self.T_eff\n # prepare figure\n nrows = (1 + int(self.fault.lower_rheo is not None)\n + int(include_locked) + int(include_deep) * 3)\n hr_locked = ((self.fault.end_upper[0, self.fault.n_locked] - self.fault.end_upper[0, 0])\n / (self.fault.end_lower[0, -1] - self.fault.end_lower[0, 0]))\n hr_lower = ((self.fault.end_lower[0, -1] - self.fault.end_lower[0, 0])\n / (self.fault.end_upper[0, -1] - self.fault.end_upper[0, self.fault.n_locked]))\n hr = ([hr_locked] * int(include_locked) + [1]\n + [hr_locked, hr_locked] * int(include_deep)\n + [hr_lower] * int(self.fault.lower_rheo is not None)\n + [hr_locked] * int(include_deep))\n fig, ax = plt.subplots(nrows=nrows, sharex=True, gridspec_kw={\"height_ratios\": hr},\n figsize=(10, 5), layout=\"constrained\")\n iax = 0\n # plot locked\n if include_locked:\n c = ax[iax].pcolormesh(t_sub,\n self.fault.end_upper[0, :self.fault.n_locked + 1] / 1e3,\n locked_slip[:, :-1],\n norm=norm, cmap=cmap, shading=\"flat\")\n ax[iax].set_ylabel(\"Locked\\n$x_1$ [km]\")\n temp_x1 = self.fault.end_upper[0, [0, self.fault.n_locked]] / 1e3\n ax[iax].set_yticks(temp_x1, [f\"{x:.0f}\" for x in temp_x1])\n iax += 1\n # plot upper creeping\n c = ax[iax].pcolormesh(t_sub,\n self.fault.end_upper[0, self.fault.n_locked:] / 1e3,\n slip[:self.fault.n_creeping_upper, :-1],\n norm=norm, cmap=cmap, shading=\"flat\")\n ax[iax].set_ylabel(\"Creeping\\n$x_1$ [km]\")\n temp_x1 = self.fault.end_upper[0, [self.fault.n_locked, -1]] / 1e3\n ax[iax].set_yticks(temp_x1, [f\"{x:.0f}\" for x in temp_x1])\n iax += 1\n # plot end patch on upper interface\n if include_deep:\n temp_x1 = np.array([self.fault.end_upper[0, -1],\n self.mid_deep_x1[0]]) / 1e3\n c = ax[iax].pcolormesh(t_sub,\n temp_x1,\n deep_creep_slip[0, :-1].reshape(1, -1),\n norm=norm, cmap=cmap, shading=\"flat\")\n ax[iax].set_ylabel(\"Deep Creep\\n$x_1$ [km]\")\n ax[iax].set_yticks(temp_x1, [f\"{temp_x1[0]:.0f}\", \"$-\\\\infty$\"])\n iax += 1\n # plot left end patch on lower interface\n if include_deep:\n temp_x1 = np.array([self.mid_deep_x1[1],\n self.fault.end_lower[0, 0]]) / 1e3\n c = ax[iax].pcolormesh(t_sub,\n temp_x1,\n -deep_creep_slip[1, :-1].reshape(1, -1),\n norm=norm, cmap=cmap, shading=\"flat\")\n ax[iax].set_ylabel(\"Deep Creep\\n$x_1$ [km]\")\n ax[iax].set_yticks(temp_x1, [\"$-\\\\infty$\", f\"{temp_x1[1]:.0f}\"])\n iax += 1\n # plot lower creeping\n if self.fault.lower_rheo is not None:\n c = ax[iax].pcolormesh(t_sub,\n self.fault.end_lower[0, :] / 1e3,\n -slip[self.fault.n_creeping_upper:, :-1],\n norm=norm, cmap=cmap, shading=\"flat\")\n ax[iax].axhline(0, c=\"k\", lw=1)\n ax[iax].axhline(self.fault.x1_lock / 1e3, c=\"k\", lw=1)\n ax[iax].set_ylabel(\"Creeping\\n$x_1$ [km]\")\n temp_x1 = self.fault.end_lower[0, [0, -1]] / 1e3\n ax[iax].set_yticks(temp_x1, [f\"{x:.0f}\" for x in temp_x1])\n iax += 1\n # plot right end patch on lower interface\n if include_deep:\n temp_x1 = np.array([self.fault.end_lower[0, -1],\n self.mid_deep_x1[2]]) / 1e3\n c = ax[iax].pcolormesh(t_sub,\n temp_x1,\n -deep_creep_slip[2, :-1].reshape(1, -1),\n norm=norm, cmap=cmap, shading=\"flat\")\n ax[iax].set_ylabel(\"Deep Creep\\n$x_1$ [km]\")\n ax[iax].set_yticks(temp_x1, [f\"{temp_x1[0]:.0f}\", \"$-\\\\infty$\"])\n iax += 1\n # finish figure\n for iax in range(len(ax)):\n for n in range(1, n_cyc_completed):\n ax[iax].axvline(n, c=\"k\", lw=1)\n ax[iax].invert_yaxis()\n ax[-1].set_xlabel(\"Normalized Time $t/T$\")\n fig.colorbar(c, ax=ax, location=\"right\", orientation=\"vertical\", fraction=0.05,\n label=\"$(s - t*v_{plate})/s_{full}$\" if deficit else \"$s/s_{full}$\")\n suptitle = \"Normalized Fault Patch Slip\"\n if deficit:\n suptitle += \" Deficit\"\n fig.suptitle(suptitle)\n return fig, ax\n\n def plot_eq_velocities(self, full_state):\n \"\"\"\n Plot the before and after velocities on all creeping fault patches\n for each distinct earthquake.\n\n Parameters\n ----------\n full_state : numpy.ndarray\n State matrix as output from :meth:`~run`.\n\n Returns\n -------\n matplotlib.figure.Figure\n matplotlib.axes.Axes\n \"\"\"\n import matplotlib.pyplot as plt\n # get indices of each last earthquake in last cycle\n temp = self.eq_df.astype(bool).drop_duplicates(keep=\"last\")\n time_eq_last = temp.index.values + (self.n_cycles_max - 1) * self.T_eff\n tdiff = np.array([np.min(np.abs(self.t_eval_joint - tlast)) for tlast in time_eq_last])\n if np.any(tdiff > 0):\n warn(\"Couldn't find exact indices, using time differences of \"\n f\"{tdiff * 365.25 * 86400} seconds.\")\n ix_eq_last = [np.argmin(np.abs(self.t_eval_joint - tlast)) for tlast in time_eq_last]\n n_eq_found = len(ix_eq_last)\n assert n_eq_found == (self.Ds_0 > 0).sum(), \\\n \"Couldn't find indices of each last non-zero earthquake in the \" \\\n \"last cycle, check for rounding errors.\"\n # calculate average slip for plotted earthquakes\n slip_last = self.eq_df.loc[temp.index, :]\n slip_avg = [slip_last.iloc[ieq, np.flatnonzero(temp.iloc[ieq, :])].mean()\n for ieq in range(n_eq_found)]\n # extract velocities\n vels = full_state[np.r_[self.fault.n_creeping_upper:self.fault.n_state_upper,\n self.fault.n_state_upper + self.fault.n_creeping_lower:\n self.fault.n_state_upper + self.fault.n_state_lower],\n :] / self.v_plate\n # prepare plot\n fig, ax = plt.subplots(nrows=n_eq_found, ncols=1 if self.fault.lower_rheo is None else 2,\n sharey=True, layout=\"constrained\")\n ax = np.asarray(ax).reshape(n_eq_found, -1)\n # loop over earthquakes\n for irow, ieq in enumerate(ix_eq_last):\n # repeat plot for before and after\n for ioff, label in enumerate([\"before\", \"after\"]):\n ax[irow, 0].set_yscale(\"symlog\", linthresh=1)\n ax[irow, 0].plot(self.fault.mid_x1_creeping[:self.fault.n_creeping_upper] / 1e3,\n vels[:self.fault.n_creeping_upper, ieq - 1 + ioff],\n c=f\"C{ioff}\", label=label)\n if self.fault.lower_rheo is not None:\n ax[irow, 1].set_yscale(\"symlog\", linthresh=1)\n ax[irow, 1].plot(\n self.fault.mid_x1_creeping[self.fault.n_creeping_upper:] / 1e3,\n -vels[self.fault.n_creeping_upper:, ieq - 1 + ioff],\n c=f\"C{ioff}\", label=label)\n # finish plot\n for irow in range(n_eq_found):\n ax[irow, 0].set_title(f\"Upper Interface: $s={slip_avg[irow]:.2g}$ m\")\n ax[irow, 0].legend()\n ax[irow, 0].set_xlabel(\"$x_1$ [km]\")\n ax[irow, 0].set_ylabel(\"$v/v_{plate}$\")\n if self.fault.lower_rheo is not None:\n ax[irow, 1].set_title(f\"Lower Interface: $s={slip_avg[irow]:.2g}$ m\")\n ax[irow, 1].axvline(0, c=\"k\", lw=1)\n ax[irow, 1].axvline(self.fault.x1_lock / 1e3, c=\"k\", lw=1)\n ax[irow, 1].tick_params(labelleft=True)\n ax[irow, 1].legend()\n ax[irow, 1].set_xlabel(\"$x_1$ [km]\")\n ax[irow, 1].set_ylabel(\"$v/v_{plate}$\")\n fig.suptitle(\"Normalized Earthquake Velocity Changes\")\n return fig, ax\n\n def plot_fault(self):\n \"\"\"\n Plot the fault.\n\n Returns\n -------\n matplotlib.figure.Figure\n matplotlib.axes.Axes\n \"\"\"\n import matplotlib.pyplot as plt\n fig, ax = plt.subplots(figsize=(10, 3), layout=\"constrained\")\n ax.plot(self.fault.end_upper[0, :self.fault.n_locked + 1]/1e3,\n self.fault.end_upper[1, :self.fault.n_locked + 1]/1e3,\n marker=\"|\", markeredgecolor=\"k\",\n label=\"Locked\")\n ax.plot(self.fault.end_upper[0, self.fault.n_locked:]/1e3,\n self.fault.end_upper[1, self.fault.n_locked:]/1e3,\n marker=\"|\", markeredgecolor=\"k\",\n label=\"Upper Creeping\")\n ax.plot(self.fault.end_lower[0, :]/1e3,\n self.fault.end_lower[1, :]/1e3,\n marker=\"|\", markeredgecolor=\"k\",\n label=\"Lower Creeping\")\n ax.plot(self.pts_surf / 1e3, np.zeros_like(self.pts_surf),\n \"^\", markeredgecolor=\"none\", markerfacecolor=\"k\",\n label=\"Observers\")\n ax.axhline(0, lw=1, c=\"0.5\", zorder=-1)\n ax.legend()\n ax.set_xlabel(\"$x_1$ [km]\")\n ax.set_ylabel(\"$x_2$ [km]\")\n ax.set_title(\"Fault Mesh and Observer Locations\")\n ax.set_aspect(\"equal\")\n return fig, ax\n\n def plot_slip_phases(self, full_state, post_inter_transition=0.01, normalize=True):\n \"\"\"\n Plot the cumulative slip on the fault for the three different\n phases (coseismic, early postseismic, and interseismic).\n\n Only works if there is a single earthquake in the sequence.\n\n Parameters\n ----------\n full_state : numpy.ndarray\n State matrix as output from :meth:`~run`.\n post_inter_transition : float, optional\n Fraction of the recurrence time that should be considered\n early postseismic and not interseismic.\n\n Returns\n -------\n matplotlib.figure.Figure\n matplotlib.axes.Axes\n \"\"\"\n import matplotlib.pyplot as plt\n from scipy.interpolate import interp1d\n # check that the sequence only has one earthquake\n if not self.n_eq == 1:\n raise NotImplementedError(\"Don't know how to plot slip phases if \"\n \"multiple earthquakes are present in the sequence.\")\n # get coseismic slip\n co = np.concatenate([self.eq_df.values.ravel(),\n self.slip_taper.ravel()])\n # get index of last earthquake in last cycle\n time_eq_last = self.eq_df.index[0] + (self.n_cycles_max - 1) * self.T_eff\n ix_eq_last = (np.flatnonzero(np.isin(self.t_eval_joint, time_eq_last))[0]\n - self.ix_break_joint[-2])\n # reorganize interseismic slip\n slip = full_state[:self.fault.n_creeping_upper, self.ix_break_joint[-2]:]\n slip_pre = slip[:, :ix_eq_last]\n slip_post = slip[:, ix_eq_last:]\n slip_pre += (slip_post[:, -1] - slip_pre[:, 0]).reshape(-1, 1)\n slip_joint = np.hstack([slip_post, slip_pre])\n slip_joint -= slip_joint[:, 0].reshape(-1, 1)\n # same for time\n t_last = self.t_eval_joint[self.ix_break_joint[-2]:].copy()\n t_last_pre = t_last[:ix_eq_last]\n t_last_post = t_last[ix_eq_last:]\n t_last_pre += t_last_post[-1] - t_last_pre[0]\n t_last_joint = np.concatenate([t_last_post, t_last_pre])\n t_last_joint -= t_last_joint[0]\n # since slip_joint is now already cumulative slip since the earthquake,\n # with the tapered slip removed, we can just read out the early\n # postseismic and rest interseismic cumulative slip distributions\n post = interp1d(t_last_joint, slip_joint)(post_inter_transition * self.T_eff)\n inter = slip_joint[:, -1] - post\n post = np.concatenate([np.zeros(self.fault.n_locked), post])\n inter = np.concatenate([np.zeros(self.fault.n_locked), inter])\n # optionally, normalize by total expected cumulative slip over the entire cycle\n if normalize:\n total_slip = self.T_eff * self.v_plate * 86400 * 365.25\n co /= total_slip\n post /= total_slip\n inter /= total_slip\n # make figure\n fig, ax = plt.subplots(layout=\"constrained\")\n ax.plot(self.fault.mid_x1[:self.fault.n_upper] / 1e3, co, label=\"Coseismic\")\n ax.plot(self.fault.mid_x1[:self.fault.n_upper] / 1e3, post, label=\"Postseismic\")\n ax.plot(self.fault.mid_x1[:self.fault.n_upper] / 1e3, inter, label=\"Interseismic\")\n ax.legend()\n ax.set_xlabel(\"$x_1$ [km]\")\n ax.set_ylabel(\"Normalized cumulative slip [-]\" if normalize\n else \"Cumulative Slip [m]\")\n ax.set_title(\"Slip Phases (Post-/Interseismic cutoff at \"\n f\"{post_inter_transition:.1%} \" \"$T_{rec}$)\")\n return fig, ax\n\n def plot_viscosity(self, full_state, return_viscosities=False):\n \"\"\"\n Plot the viscosity structure with depth for the steady state, as well as\n for the immediate pre- and coseismic velocities.\n\n For multiple earthquakes, it will use the minimum preseismic and maximum\n postseismic velocities.\n\n Parameters\n ----------\n full_state : numpy.ndarray\n State matrix as output from :meth:`~run`.\n return_viscosities : bool, optional\n Also return the preseismic, steady-state, and postseismic viscosities.\n\n Returns\n -------\n matplotlib.figure.Figure\n matplotlib.axes.Axes\n \"\"\"\n import matplotlib.pyplot as plt\n # get indices of each last earthquake in last cycle\n temp = self.eq_df.astype(bool).drop_duplicates(keep=\"last\")\n time_eq_last = temp.index.values + (self.n_cycles_max - 1) * self.T_eff\n tdiff = np.array([np.min(np.abs(self.t_eval_joint - tlast)) for tlast in time_eq_last])\n if np.any(tdiff > 0):\n warn(\"Couldn't find exact indices, using time differences of \"\n f\"{tdiff * 365.25 * 86400} seconds.\")\n ix_eq_last = [np.argmin(np.abs(self.t_eval_joint - tlast)) for tlast in time_eq_last]\n n_eq_found = len(ix_eq_last)\n assert n_eq_found == (self.Ds_0 > 0).sum(), \\\n \"Couldn't find indices of each last non-zero earthquake in the \" \\\n \"last cycle, check for rounding errors.\"\n # calculate average slip for plotted earthquakes\n slip_last = self.eq_df.loc[temp.index, :]\n slip_avg = [slip_last.iloc[ieq, np.flatnonzero(temp.iloc[ieq, :])].mean()\n for ieq in range(n_eq_found)]\n # extract preseismic velocities\n vels_pre = np.array([full_state[self.fault.n_creeping_upper:self.fault.n_state_upper,\n ix - 1] for ix in ix_eq_last]).T\n vels_post = np.array([full_state[self.fault.n_creeping_upper:self.fault.n_state_upper,\n ix] for ix in ix_eq_last]).T\n if isinstance(self.fault.upper_rheo, NonlinearViscous):\n # calculate viscosity profiles\n vis_pre = SubductionSimulation.get_alpha_eff(self.alpha_n_vec.reshape(-1, 1),\n self.n_vec.reshape(-1, 1),\n vels_pre)\n vis_ss = SubductionSimulation.get_alpha_eff(self.alpha_n_vec,\n self.n_vec,\n self.v_plate_eff)\n vis_post = SubductionSimulation.get_alpha_eff(self.alpha_n_vec.reshape(-1, 1),\n self.n_vec.reshape(-1, 1),\n vels_post)\n elif isinstance(self.fault.upper_rheo, RateStateSteadyLogarithmic):\n vis_pre = SubductionSimulation.get_alpha_eff_from_alpha_h(\n self.alpha_h_vec.reshape(-1, 1), vels_pre)\n vis_ss = SubductionSimulation.get_alpha_eff_from_alpha_h(\n self.alpha_h_vec.reshape(-1, 1), self.v_plate_eff)\n vis_post = SubductionSimulation.get_alpha_eff_from_alpha_h(\n self.alpha_h_vec.reshape(-1, 1), vels_post)\n else:\n raise NotImplementedError()\n vis_mins = 10**np.floor(np.log10(np.ma.masked_invalid(vis_post*0.999).min(axis=0)))\n vis_maxs = 10**np.ceil(np.log10(np.ma.masked_invalid(vis_pre*1.001).max(axis=0)))\n # make plot\n fig, ax = plt.subplots(ncols=n_eq_found, sharey=True, layout=\"constrained\")\n ax = np.atleast_1d(ax)\n ax[0].set_ylabel(\"$x_2$ [km]\")\n for i in range(n_eq_found):\n ax[i].fill_betweenx([0, self.fault.mid_x2_creeping[1] / 1e3],\n vis_mins[i], vis_maxs[i], facecolor=\"0.8\", label=\"Locked\")\n ax[i].fill_betweenx(self.fault.mid_x2_creeping[:self.fault.n_creeping_upper] / 1e3,\n vis_pre[:, i], vis_post[:, i], alpha=0.5, label=\"Simulated\")\n ax[i].plot(vis_ss,\n self.fault.mid_x2_creeping[:self.fault.n_creeping_upper] / 1e3,\n label=\"Plate Rate\")\n ax[i].set_xscale(\"log\")\n ax[i].legend(loc=\"lower left\")\n ax[i].set_ylim(self.fault.mid_x2_creeping[self.fault.n_creeping_upper - 1] / 1e3,\n 0)\n ax[i].set_xlim(vis_mins[i], vis_maxs[i])\n ax[i].set_title(f\"$s={slip_avg[i]:.2g}$ m\")\n ax[i].set_xlabel(r\"$\\alpha_{eff}$ [Pa * s/m]\")\n # finish\n if return_viscosities:\n return fig, ax, vis_pre, vis_ss, vis_post\n else:\n return fig, ax\n\n def plot_viscosity_timeseries(self, full_state, return_viscosities=False):\n \"\"\"\n Plot the viscosity timeseries with depth for the entire last cycle.\n\n Parameters\n ----------\n full_state : numpy.ndarray\n State matrix as output from :meth:`~run`.\n return_viscosities : bool, optional\n Also return the viscosity timeseries.\n\n Returns\n -------\n matplotlib.figure.Figure\n matplotlib.axes.Axes\n \"\"\"\n import matplotlib.pyplot as plt\n from matplotlib.colors import LogNorm\n from cmcrameri import cm\n # check that the sequence only has one earthquake\n if not self.n_eq == 1:\n raise NotImplementedError(\"Don't know how to plot viscosity timeseries if \"\n \"multiple earthquakes are present in the sequence.\")\n # get index of last earthquake in last cycle\n time_eq_last = self.eq_df.index[0] + (self.n_cycles_max - 1) * self.T_eff\n ix_eq_last = (np.flatnonzero(np.isin(self.t_eval_joint, time_eq_last))[0]\n - self.ix_break_joint[-2])\n # reorganize interseismic velocities\n vels = full_state[self.fault.n_creeping_upper:2*self.fault.n_creeping_upper,\n self.ix_break_joint[-2]:]\n vels_pre = vels[:, :ix_eq_last]\n vels_post = vels[:, ix_eq_last:]\n vels = np.hstack([vels_post, vels_pre])\n # same for time\n t_last = self.t_eval_joint[self.ix_break_joint[-2]:].copy()\n t_last_pre = t_last[:ix_eq_last]\n t_last_post = t_last[ix_eq_last:]\n t_last_pre += t_last_post[-1] - t_last_pre[0]\n t_last_joint = np.concatenate([t_last_post, t_last_pre])\n t_last_joint -= t_last_joint[0]\n # convert velocities to effective viscosity\n if isinstance(self.fault.upper_rheo, NonlinearViscous):\n vis_ts = SubductionSimulation.get_alpha_eff(self.alpha_n_vec.reshape(-1, 1),\n self.n_vec.reshape(-1, 1),\n vels)\n elif isinstance(self.fault.upper_rheo, RateStateSteadyLogarithmic):\n vis_ts = SubductionSimulation.get_alpha_eff_from_alpha_h(\n self.alpha_h_vec.reshape(-1, 1), vels)\n else:\n raise NotImplementedError()\n # get index of deep transition\n patch_depths = -self.fault.mid_x2_creeping[:self.fault.n_creeping_upper]\n ix_deep = np.argmin(np.abs(patch_depths - self.fault.upper_rheo.deep_transition))\n # subset vels to skip zero-velocity uppermost patch\n vis_ts = vis_ts[1:, :]\n # get percentage of final viscosity\n rel_vis = vis_ts / vis_ts[:, -1][:, None]\n rel_vis_masked = np.ma.MaskedArray(rel_vis, np.diff(rel_vis, axis=1,\n prepend=rel_vis[:, 0][:, None]\n ) <= 0).filled(np.NaN)\n levels = [0.2, 0.4, 0.6, 0.8]\n rel_vis_iquant = np.concatenate([np.nanargmax(rel_vis_masked > lvl, axis=1, keepdims=True)\n for lvl in levels], axis=1)\n # normalize time\n t_sub = t_last_joint / self.T_eff\n # prepare plot\n fig, ax = plt.subplots(figsize=(10, 5), layout=\"constrained\")\n # plot velocities\n c = ax.pcolormesh(\n t_sub,\n np.abs(self.fault.end_upper[1, self.fault.n_locked+1:self.fault.n_locked+ix_deep+1]\n / 1e3),\n vis_ts[:ix_deep-1, :-1],\n norm=LogNorm(vmin=10**np.floor(np.log10(np.median(vis_ts[:ix_deep-1, 0]))),\n vmax=10**np.ceil(np.log10(np.max(vis_ts[:ix_deep-1, -1])))),\n cmap=cm.batlow, shading=\"flat\")\n for i in range(len(levels)):\n ax.plot(t_sub[rel_vis_iquant[:ix_deep-1, i]],\n patch_depths[1:ix_deep] / 1e3,\n color=\"w\")\n ax.set_xscale(\"symlog\", linthresh=1e-3)\n ax.set_xlim([0, 1])\n # make the y-axis increasing downwards to mimic depth even though we're plotting x1\n ax.invert_yaxis()\n # finish figure\n ax.set_ylabel(\"Depth $x_2$ [km]\")\n ax.set_xlabel(\"Normalized Time $t/T$\")\n fig.colorbar(c, ax=ax, location=\"right\", orientation=\"vertical\", fraction=0.05,\n label=r\"$\\alpha_{eff}$\")\n fig.suptitle(\"Effective Viscosity Timeseries\")\n # finish\n if return_viscosities:\n return fig, ax, t_sub, vis_ts\n else:\n return fig, ax\n"},"repo_name":{"kind":"string","value":"tobiscode/seqeas-public"},"sub_path":{"kind":"string","value":"seqeas/subduction2d.py"},"file_name":{"kind":"string","value":"subduction2d.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":145621,"string":"145,621"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"en"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":0,"string":"0"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"6"}}},{"rowIdx":386,"cells":{"seq_id":{"kind":"string","value":"21721374854"},"text":{"kind":"string","value":"import os\r\nimport math\r\nimport json\r\nimport librosa\r\n\r\nfrom settings import (\r\n SAMPLE_RATE,\r\n NUM_MFCC,\r\n N_FTT,\r\n HOP_LENGTH,\r\n NUM_SEGMENTS,\r\n DURATION,\r\n)\r\n\r\nDATASET_PATH = \"data\\\\archive\\\\Data\\\\genres_original\" # loaded using the GTZAN Music Genre Classification dataset at https://www.kaggle.com/datasets/andradaolteanu/gtzan-dataset-music-genre-classification\r\nJSON_PATH = \"data\\\\data.json\"\r\n\r\nSAMPLES_PER_TRACK = SAMPLE_RATE * DURATION\r\n\r\ndef dump_mfccs_to_json(dataset_path=None):\r\n \"\"\"\r\n Processes test data as MFCCs and labels\r\n \"\"\"\r\n dataset_path = dataset_path if dataset_path is not None else DATASET_PATH\r\n data = {\r\n \"mapping\": [],\r\n \"mfcc\": [],\r\n \"labels\" : [],\r\n }\r\n samples_per_segment = int(SAMPLES_PER_TRACK/NUM_SEGMENTS)\r\n expected_mfcc = math.ceil(samples_per_segment/HOP_LENGTH)\r\n for i, (dirpath, dirnames, filenames) in enumerate(os.walk(dataset_path)):\r\n if dirpath is not dataset_path:\r\n dirpath_components = dirpath.split(\"\\\\\")\r\n label = dirpath_components[-1]\r\n data[\"mapping\"].append(label)\r\n print(f\"Processing: {label}\")\r\n\r\n for f in filenames:\r\n file_path = os.path.join(dirpath, f)\r\n signal, sr = librosa.load(file_path, sr=SAMPLE_RATE)\r\n\r\n for s in range(NUM_SEGMENTS):\r\n start_sample = samples_per_segment * s\r\n finish_sample = start_sample + samples_per_segment\r\n mfcc = librosa.feature.mfcc(signal[start_sample:finish_sample], sr=sr, n_fft=N_FTT, n_mfcc=NUM_MFCC, hop_length=HOP_LENGTH)\r\n mfcc = mfcc.T\r\n if len(mfcc) == expected_mfcc:\r\n data[\"mfcc\"].append(mfcc.tolist())\r\n data[\"labels\"].append(i-1)\r\n print(f\"{file_path}, segment:{s+1}\")\r\n\r\n with open(JSON_PATH, \"w\") as fp:\r\n json.dump(data, fp, indent=4)\r\n\r\nif __name__ == \"__main__\":\r\n dump_mfccs_to_json()"},"repo_name":{"kind":"string","value":"jmrossi98/genre_detect"},"sub_path":{"kind":"string","value":"src/preprocess_data.py"},"file_name":{"kind":"string","value":"preprocess_data.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":2051,"string":"2,051"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"en"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":0,"string":"0"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"6"}}},{"rowIdx":387,"cells":{"seq_id":{"kind":"string","value":"42111163390"},"text":{"kind":"string","value":"from fastapi import Body, FastAPI\nfrom pydantic import BaseModel\nfrom typing import Annotated\n\nfrom enum import Enum\n\napp = FastAPI()\n\n\nclass ModelName(str, Enum):\n afs = \"afs\"\n har = \"har1\"\n\n\nclass Item(BaseModel):\n name: str\n description: str | None = None\n price: float\n tax: float | None = None\n tags: set[str] = set()\n\n\nfake_items_db = [{\"item_name\": \"Foo\"}, {\"item_name\": \"Bar\"}, {\"item_name\": \"Baz\"}]\n\n\n@app.post(\"/items/create_item/\")\nasync def create_items(item: Item):\n item_dict = item.model_dump()\n if item.tax:\n price_with_tax = item.price + item.tax\n item_dict.update({\"price with tax\": price_with_tax})\n return item_dict\n\n\n@app.get(\"/\")\nasync def home():\n return {\"Data\": \"Test\"}\n\n\n@app.get(\"/items/\")\nasync def read_item(skip: int = 0, limit: int = 10):\n return fake_items_db[skip: skip + limit]\n\n\n@app.put(\"/add_items/{item_id}\")\nasync def add_item(item_id: int, item: Item):\n return {\"item_id\": item_id, **item.model_dump()}\n\n\n@app.put(\"/items/{item_id}\")\nasync def update_item(item_id: int, item: Annotated[Item, Body(examples={\"name\": \"foo\", \"description\": \"cool item\", \"price\": \"24\", \"tax\": 3})]):\n result = {\"item_id\": item_id, \"item\": item}\n return result\n\n\n@app.get(\"/models/{model_name}\")\nasync def get_model(model_name: ModelName):\n if model_name is ModelName.afs:\n return {\"model_name\": model_name, \"message\": 1}\n if model_name.value == \"har\":\n return {\"model_name\": model_name, \"message\": 2}\n return {\"model_name\": model_name, \"message\": -1}\n\n\n@app.get(\"/files/{file_path:path}\")\nasync def read_file(file_path: str):\n return {\"file_path\": file_path}\n"},"repo_name":{"kind":"string","value":"mkilic20/task"},"sub_path":{"kind":"string","value":"testing.py"},"file_name":{"kind":"string","value":"testing.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":1663,"string":"1,663"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"en"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":0,"string":"0"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"6"}}},{"rowIdx":388,"cells":{"seq_id":{"kind":"string","value":"2987884048"},"text":{"kind":"string","value":"from urllib2 import urlopen, HTTPError\nfrom django.template.defaultfilters import slugify\nfrom django.core.files.base import ContentFile\nfrom django.db import transaction, IntegrityError\n\nfrom item.models import Item, Link\nfrom movie.models import Movie, Actor, Director, Genre\nfrom decorators.retry import retry\n\nclass LoadMovie():\n \"\"\"\n This manager inserts a movie into the database along with its\n corresponding genres, actors, and directors.\n \"\"\"\n exists = False\n \n def __init__(self, title, imdb_id, runtime,\n synopsis, theater_date, keywords):\n \"\"\"\n Inserts the movie into the database if it doesn't already\n exist in the database.\n \"\"\"\n try:\n self.movie, self.created = Movie.objects.get_or_create(\n title=title,\n imdb_id=imdb_id,\n runtime=runtime,\n synopsis=synopsis,\n theater_date=theater_date,\n keywords = keywords,\n url=slugify(title)\n )\n except IntegrityError:\n print('TRANSACTION FAILED ON MOVIE INSERT: Rolling back now...')\n transaction.rollback()\n\n def insert_genres(self, genres):\n \"\"\"\n Inserts the genres for the movie.\n \"\"\"\n genre_list = []\n try:\n for g in genres:\n genre, created = Genre.objects.get_or_create(\n name=g, url=slugify(g))\n genre_list.append(genre)\n self.movie.genre.add(*genre_list)\n except IntegrityError:\n print('TRANSACTION FAILED ON GENRE INSERT: Rolling back now...')\n transaction.rollback()\n\n def insert_actors(self, actors):\n \"\"\"\n Inserts the actors for the movie.\n \"\"\"\n actor_list = []\n try:\n for a in actors:\n actor, created = Actor.objects.get_or_create(\n name=a, url=slugify(a))\n actor_list.append(actor)\n self.movie.actors.add(*actor_list)\n except IntegrityError:\n print('TRANSACTION FAILED ON ACTOR INSERT: Rolling back now...')\n transaction.rollback()\n \n\n def insert_directors(self, directors):\n \"\"\"\n Inserts the directors for the movie.\n \"\"\"\n director_list = []\n try:\n for d in directors:\n director, created = Director.objects.get_or_create(\n name=d, url=slugify(d))\n director_list.append(director)\n self.movie.directors.add(*director_list)\n except IntegrityError:\n print('TRANSACTION FAILED ON DIRECTOR INSERT: Rolling back now...')\n transaction.rollback()\n\n @retry(HTTPError)\n def insert_image(self, url):\n \"\"\"\n Inserts the image for the movie.\n \"\"\"\n try:\n if 'default.jpg' in self.movie.image.url or self.created:\n image = urlopen(url, timeout=15)\n self.movie.image.save(\n self.movie.url+u'.jpg',\n ContentFile(image.read())\n )\n except IntegrityError:\n print('TRANSACTION FAILED ON IMAGE INSERT: Rolling back now...')\n transaction.rollback()\n\n def insert_trailer(self, url):\n \"\"\"\n Inserts the trailer as a link.\n \"\"\"\n try:\n Link.objects.get_or_create(\n item=self.movie.item,\n partner=\"YouTube\",\n url=url\n )\n except IntegrityError:\n print('TRANSACTION FAILED ON TRAILER INSERT: Rolling back now...')\n transaction.rollback()\n"},"repo_name":{"kind":"string","value":"sameenjalal/mavenize-beta"},"sub_path":{"kind":"string","value":"mavenize/lib/db/loadmovie.py"},"file_name":{"kind":"string","value":"loadmovie.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":3712,"string":"3,712"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"en"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":1,"string":"1"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"6"}}},{"rowIdx":389,"cells":{"seq_id":{"kind":"string","value":"11332000472"},"text":{"kind":"string","value":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jul 1 10:10:45 2021\n\n@author: 82106\n\"\"\"\n\nimport cv2\nimport os\nimport sys\n\nif not os.path.exists('result'):\n os.makedirs('result')\n\ncapture = cv2.VideoCapture(1)\n\nif not capture.isOpened():\n print('Camera open failed!')\n sys.exit()\n\n'''\nframeWidth = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH))\nframeHeight = int(capture.get(cv2.CAP_PROP_FRMAE_HEIGHT))\nframeSize = (frameWidth, frameHeight)\nprint('frame size : {}'.format(frameSize))\n'''\n\ncapture.set(cv2.CAP_PROP_FRAME_WIDTH, 640)\ncapture.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)\n\ncount = 1\n\nwhile True:\n ret, frame = capture.read()\n \n if not ret:\n print('Frame read error!')\n sys.exit()\n \n cv2.imshow('frame', frame)\n \n key = cv2.waitKey(1)\n if key == ord('s'):\n print('Screenshot saved!')\n cv2.imwrite('result/screenshot{}.png'.format(count), frame, params=[cv2.IMWRITE_PNG_COMPRESSION, 0])\n count += 1\n \n elif key == ord('q'):\n break\n \ncapture.release()\ncv2.destroyAllWindows()\n \n \n \n \n"},"repo_name":{"kind":"string","value":"dongwooky/Personal-Project"},"sub_path":{"kind":"string","value":"container/camera_screenshot.py"},"file_name":{"kind":"string","value":"camera_screenshot.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":1084,"string":"1,084"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"en"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":0,"string":"0"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"6"}}},{"rowIdx":390,"cells":{"seq_id":{"kind":"string","value":"5759183851"},"text":{"kind":"string","value":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n'''\n@AUTHOR:Joselyn Zhao\n@CONTACT:zhaojing17@foxmail.com\n@HOME_PAGE:joselynzhao.top\n@SOFTWERE:PyCharm\n@FILE:main.py\n@TIME:2019/6/13 10:32\n@DES:\n'''\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nold_v = tf.logging.get_verbosity()\ntf.logging.set_verbosity(tf.logging.ERROR)\nfrom tensorflow.examples.tutorials.mnist import input_data\nfrom Lenet import *\nfrom PIL import Image\n\nmnist = input_data.read_data_sets('../../../data/mnist', one_hot=True)\nx_test = np.reshape(mnist.test.images, [-1, 28, 28, 1])\nx_test = np.pad(x_test, ((0, 0), (2, 2), (2, 2), (0, 0)),\n 'constant') # print(\"Updated Image Shape: {}\".format(X_train[0].shape))\ntf.logging.set_verbosity(old_v)\n\n\niteratons = 1000\nbatch_size = 64\nma = 0\nsigma = 0.1\nlr = 0.01\n\n\ndef get_sample100(label):\n sample100_x=[]\n sample100_y=[]\n count = 0\n for i in range(len(mnist.test.images)):\n if mnist.test.labels[i][label]==1:\n count+=1\n sample100_y.append(mnist.test.labels[i])\n sample100_x.append(mnist.test.images[i])\n if count>=100:\n break\n return sample100_x,sample100_y\n\n\ndef train_lenet(lenet):\n with tf.Session() as sess: #这个session需要关闭么?\n sess.run(tf.global_variables_initializer())\n\n tf.summary.image(\"input\",lenet.x,3)\n merged_summary = tf.summary.merge_all()\n\n writer = tf.summary.FileWriter(\"LOGDIR/4/\",sess.graph) # 保存到不同的路径下\n # writer.add_graph(sess.graph)\n for ii in range(iteratons):\n batch_xs,batch_ys = mnist.train.next_batch(batch_size)\n batch_xs = np.reshape(batch_xs,[-1,28,28,1])\n batch_xs = np.pad(batch_xs,((0, 0), (2, 2), (2, 2), (0, 0)), 'constant')\n sess.run(lenet.train_step,feed_dict ={lenet.x:batch_xs,lenet.y_:batch_ys})\n if ii % 50 == 1:\n acc,s = sess.run([lenet.accuracy,merged_summary],feed_dict ={lenet.x:x_test,lenet.y_:mnist.test.labels})\n writer.add_summary(s,ii)\n print(\"%5d: accuracy is: %4f\" % (ii, acc))\n sample100_x,sample100_y = get_sample100(4) #随便选了一个label 输入0-9的值\n sample100_x = np.reshape(sample100_x,[-1,28,28,1])\n sample100_x = np.pad(sample100_x, ((0, 0), (2, 2), (2, 2), (0, 0)), 'constant')\n x_min = tf.reduce_min(lenet.fc2)\n x_max = tf.reduce_max(lenet.fc2)\n fc2 = (lenet.fc2 - x_min) / (x_max - x_min)\n fc2 = sess.run(fc2,feed_dict={lenet.x:sample100_x,lenet.y_:sample100_y})\n plt.imshow(fc2)\n plt.show()\n\n print('[accuracy,loss]:', sess.run([lenet.accuracy], feed_dict={lenet.x:x_test,lenet.y_:mnist.test.labels}))\n\n\nif __name__ ==\"__main__\":\n act = \"sigmoid\"\n lenet = Lenet(ma,sigma,lr,act)\n train_lenet(lenet)"},"repo_name":{"kind":"string","value":"joselynzhao/DeepLearning.Advanceing"},"sub_path":{"kind":"string","value":"DL_6/work/main.py"},"file_name":{"kind":"string","value":"main.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":2860,"string":"2,860"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"en"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":5,"string":"5"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"6"}}},{"rowIdx":391,"cells":{"seq_id":{"kind":"string","value":"44602770515"},"text":{"kind":"string","value":"import pytesseract\nimport PIL\nfrom os import system\nimport re\nsystem(\"tesseract -l\")\n\nclass workout:\n reps = 0\n exercise_name = \"\"\n\n\ndef compile_text_to_workouts(text):\n workouts = []\n num = 0\n\n \n for word in text:\n new_workout = workout()\n if word.isdigit():\n new_workout.reps = word\n num+=1\n while num < len(text) and not text[num].isdigit() :\n new_workout.exercise_name += \" \" + str(text[num]) \n num +=1\n if not new_workout.reps == 0 or not new_workout.exercise_name == \"\":\n workouts.append(new_workout)\n\n return workouts\n\n\n####MAIN:############################################################### \n\nletters = (pytesseract.image_to_string(r'../GetFit/workout_routine1.png'))\nprint(letters)\nsentence = re.findall(r'\\w+', letters) ##turns letters into words and makes list\nprint(sentence)\ncompile_text_to_workouts(sentence) ###turns into actual workout routine\n"},"repo_name":{"kind":"string","value":"reeyagup/GetFit"},"sub_path":{"kind":"string","value":"image_to_text.py"},"file_name":{"kind":"string","value":"image_to_text.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":972,"string":"972"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"en"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":0,"string":"0"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"6"}}},{"rowIdx":392,"cells":{"seq_id":{"kind":"string","value":"35299316629"},"text":{"kind":"string","value":"# -*- coding: utf-8 -*-\n\nimport xml.etree.ElementTree as ET\nfrom xml.etree import ElementTree as etree\nfrom xml.dom import minidom\nimport untangle\n\ndef xml_generator(input_filename, input_foldername, exif_list, root_path):\n root = ET.Element('annotation')\n \n source = ET.SubElement(root, 'source')\n image_date = ET.SubElement(source, 'date')\n image_date.text = str(exif_list[0])\n folder_name = ET.SubElement(source, 'folder')\n folder_name.text = input_foldername\n file_name = ET.SubElement(source, 'filename')\n file_name.text = input_filename\n\n gpsinfo = ET.SubElement(root, 'gpsinfo')\n gps_altitude = ET.SubElement(gpsinfo, 'GPSAltitude')\n gps_altitude.text = str(exif_list[1])\n gps_latitude = ET.SubElement(gpsinfo, 'GPSLatitude')\n gps_latitude.text = str(exif_list[2])\n gps_latitude_ref = ET.SubElement(gpsinfo, 'GPSLatitudeRef')\n gps_latitude_ref.text = str(exif_list[3])\n gps_longitude = ET.SubElement(gpsinfo, 'GPSLongitude')\n gps_longitude.text = str(exif_list[4])\n gps_longitude_ref = ET.SubElement(gpsinfo, 'GPSLongitudeRef')\n gps_longitude_ref.text = str(exif_list[5])\n \n '''\n There should be position annotation inside 'object' tag\n '''\n #ann_obj = ET.SubElement(root, 'object')\n \n xml_string = etree.tostring(root)\n tree = minidom.parseString(xml_string)\n xml_string = tree.toxml()\n \n save_path = '%s/ob_%s/%s.xml' % (root_path, input_foldername, input_filename[:-4])\n \n f=open(save_path,'wb')\n f.write(tree.toprettyxml(encoding='utf-8'))\n f.close()\n\ndef xml_parsing(input_xml_file):\n obj = untangle.parse(input_xml_file)\n \n date_time = obj.annotation.source.date.cdata\n GPSAltitude = obj.annotation.gpsinfo.GPSAltitude.cdata\n GPSLatitude = obj.annotation.gpsinfo.GPSLatitude.cdata\n GPSLatitudeRef = obj.annotation.gpsinfo.GPSLatitudeRef.cdata\n GPSLongitude = obj.annotation.gpsinfo.GPSLongitude.cdata\n GPSLongitudeRef = obj.annotation.gpsinfo.GPSLongitudeRef.cdata\n \n xml_info_keys = ['Date', 'GPSAltitude', 'GPSLatitude', 'GPSLatitudeRef', 'GPSLongitude', 'GPSLongitudeRef']\n xml_info_value = [date_time, GPSAltitude, GPSLatitude, GPSLatitudeRef, GPSLongitude, GPSLongitudeRef]\n xml_info_dict = dict(zip(xml_info_keys, xml_info_value))\n return xml_info_dict\n\n#im = '/Users/xiang/ml_ann/ann_tools_eric/dataset/ob_curr/00001.xml'\n#xml_parsing(im)\n"},"repo_name":{"kind":"string","value":"simonchanper/ml_ann"},"sub_path":{"kind":"string","value":"ann_tools_eric/xml_process.py"},"file_name":{"kind":"string","value":"xml_process.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":2403,"string":"2,403"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"en"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":0,"string":"0"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"6"}}},{"rowIdx":393,"cells":{"seq_id":{"kind":"string","value":"19699008636"},"text":{"kind":"string","value":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n def getIntersectionNode(self, headA: ListNode, headB: ListNode) -> ListNode:\n d={}\n while headA:\n d[headA] = 1\n headA = headA.next\n\n while headB:\n if headB in d:\n return headB\n headB=headB.next\n\n# the idea is to traverse list A and store the address/reference to each node\n# in a hash set. Then check every node bi in list B: if bi appears in the hash set,\n# then bi is the intersection node.\n\n# I did not realize that the hash set can be created like this\n"},"repo_name":{"kind":"string","value":"Superhzf/python_exercise"},"sub_path":{"kind":"string","value":"Linked List/Intersection of Two Linked Lists/solution.py"},"file_name":{"kind":"string","value":"solution.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":692,"string":"692"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"en"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":1,"string":"1"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"6"}}},{"rowIdx":394,"cells":{"seq_id":{"kind":"string","value":"19516521842"},"text":{"kind":"string","value":"#for X in range(1,10):\n #print(X)\n\n#for char in \"cofee\":\n #print(char * 10)\n\n#for num in range (0,20,2):#if you start with odd nums it will print odd(1,20,2)\n #print(num)\n\n#times = input(\"how many times do i have to tell you? \") \n#times = int(times)\n#for time in range(times) :\n # print (\"clean up your room!\") \n\n\n#for num in range(1,21):\n #if num ==4 or num ==13:\n #print(f\"{num} is unlucky\")\n #elif num % 2==0:\n #print(f\"{num} is even\") \n # else:\n #print(f\"{num} is odd\")\n\n\n\n#for num in range(1,21):\n #if num ==4 or num ==13:\n #print(f\"{num} is unlucky\")\n #elif num % 2==0:\n #print(f\"{num} is even\") \n #else:\n #state() \n #print(f\"{num} is odd\") \n\n\n\n#while loop\n#msg = input(\"whats your password?\")\n#while msg != \"bananas\":\n #print(\"wrong!\")\n #msg = input(\"whats your password?\") \n#print(\"correct!\") \n\n#num =1\n#while num < 11:\n #print(num)\n #num += 1 \n\n\n#for num in range(1,11) :\n #print(\" \\U0001f600\" * num)\n\n#times = 1\n#while times < 11:\n #print(\" \\U0001f600\" * times) \n #times += 1 \n\n\n#msg = input(\"say something: \") \n#while msg != \"stop copying me\":\n #print(msg)\n #msg = input() \n#print(\"you win!\") \n\n\nwhile True:\n command = input(\"type 'exit' to exit:\")\n if (command == \"exit\"):\n break\n\n\n\n\ntimes = int(input(\"how many times do i have to tell yah?\"))\nfor time in range(times):\n print(\"claen your room!\")\n if time >= 3:\n print(\"do you even listen anymore\")\n break\n\n\n "},"repo_name":{"kind":"string","value":"mevine/seen"},"sub_path":{"kind":"string","value":"jee.py"},"file_name":{"kind":"string","value":"jee.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":1511,"string":"1,511"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"en"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":0,"string":"0"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"6"}}},{"rowIdx":395,"cells":{"seq_id":{"kind":"string","value":"71504118267"},"text":{"kind":"string","value":"from __future__ import annotations\n\nfrom io import BufferedIOBase, BytesIO\nfrom typing import List, Optional\n\nfrom helper import (\n byte_to_int,\n encode_varstr,\n hash160,\n int_to_byte,\n int_to_little_endian,\n little_endian_to_int,\n read_varint,\n sha256,\n)\nfrom op import (\n decode_num,\n encode_minimal_num,\n is_number_op_code,\n number_to_op_code,\n op_code_to_number,\n OP_0,\n OP_CHECKLOCKTIMEVERIFY,\n OP_CHECKMULTISIG,\n OP_CHECKMULTISIGVERIFY,\n OP_CHECKSEQUENCEVERIFY,\n OP_CHECKSIG,\n OP_CHECKSIGVERIFY,\n OP_DROP,\n OP_DUP,\n OP_EQUAL,\n OP_EQUALVERIFY,\n OP_FROMALTSTACK,\n OP_HASH160,\n OP_IF,\n OP_NOTIF,\n OP_PUSHDATA1,\n OP_PUSHDATA2,\n OP_TOALTSTACK,\n OP_VERIFY,\n OP_CODE_NAMES,\n OP_CODE_FUNCTIONS,\n)\nfrom timelock import Locktime, Sequence\nfrom witness import Witness\n\n\nclass Script(list):\n def __add__(self, other: Script) -> Script:\n return self.__class__(super().__add__(other))\n\n def __radd__(self, other: Script) -> Script:\n o = self.__class__(other)\n return o + self\n\n def __new__(cls,\n commands: Optional[List[Union(bytes, str)]] = None) -> Script:\n if commands is None:\n commands = []\n for current in commands:\n if type(current) not in (bytes, ):\n raise ValueError(\n f'Every command should be bytes or str, got {current} instead'\n )\n return super().__new__(cls, commands)\n\n def __repr__(self) -> str:\n result = ''\n for current in self:\n if OP_CODE_NAMES.get(current):\n result += f'{OP_CODE_NAMES[current]} '\n elif type(current) == str:\n result += f'<{current}> '\n else:\n result += f'{current.hex()} '\n return result\n\n @classmethod\n def parse(cls, s: BufferedIOBase) -> Script:\n # get the length of the entire field\n length = read_varint(s)\n # initialize the commands array\n commands = []\n # initialize the number of bytes we've read to 0\n count = 0\n # loop until we've read length bytes\n while count < length:\n # get the current byte\n current = s.read(1)\n # increment the bytes we've read\n count += 1\n # convert the current byte to an integer\n current_int = current[0]\n # if the current byte is between 1 and 75 inclusive\n if current_int <= 75:\n # add the next n bytes as a command\n commands.append(s.read(current_int))\n count += current_int\n elif current == OP_PUSHDATA1:\n # op_pushdata1\n data_length = byte_to_int(s.read(1))\n commands.append(s.read(data_length))\n count += data_length + 1\n elif current == OP_PUSHDATA2:\n # op_pushdata2\n data_length = little_endian_to_int(s.read(2))\n commands.append(s.read(data_length))\n count += data_length + 2\n else:\n # add the command to the list of commands\n commands.append(current)\n if count != length:\n raise SyntaxError(f'parsing script failed {commands}')\n return cls(commands)\n\n def miniscript(self):\n from miniscript import MiniScript\n return MiniScript.from_script(Script(self[:]))\n\n def is_locktime_locked(self) -> bool:\n '''Returns whether the script starts with\n OP_CLTV OP_DROP'''\n return len(self) >= 3 and \\\n (is_number_op_code(self[0]) or len(self[0]) > 1) and \\\n self[1] == OP_CHECKLOCKTIMEVERIFY and self[2] == OP_DROP\n\n def is_multisig(self) -> bool:\n '''Returns whether the script follows the\n OP_k ... OP_n OP_CHECKMULTISIG pattern'''\n if self[-1] != OP_CHECKMULTISIG:\n return False\n if not is_number_op_code(self[-2]):\n return False\n n = op_code_to_number(self[-2])\n if len(self) < n + 3:\n return False\n for current in self[-n - 2:-2]:\n if len(current) != 33:\n return False\n if not is_number_op_code(self[-n - 3]):\n return False\n k = op_code_to_number(self[-n - 3])\n if k < 1 or k > 15:\n return False\n if n < k or n > 15:\n return False\n return True\n\n def is_multisig_timelock(self) -> bool:\n '''Returns whether the script follows the\n OP_CLTV/OP_CSV OP_DROP OP_k ... OP_n OP_CHECKMULTISIG pattern'''\n return (self.is_sequence_locked() or self.is_locktime_locked()) and \\\n self.is_multisig()\n\n def is_p2pkh(self) -> bool:\n '''Returns whether the script follows the\n OP_DUP OP_HASH160 <20 byte hash> OP_EQUALVERIFY OP_CHECKSIG pattern.'''\n # there should be exactly 5 commands\n # OP_DUP, OP_HASH160, 20-byte hash, OP_EQUALVERIFY, OP_CHECKSIG\n return len(self) == 5 and self[0] == OP_DUP and self[1] == OP_HASH160 \\\n and len(self[2]) == 20 and self[3] == OP_EQUALVERIFY \\\n and self[4] == OP_CHECKSIG\n\n def is_p2sh(self) -> bool:\n '''Returns whether the script follows the\n OP_HASH160 <20 byte hash> OP_EQUAL pattern.'''\n # there should be exactly 3 commands\n # OP_HASH160, 20-byte hash, OP_EQUAL\n return len(self) == 3 and self[0] == OP_HASH160 and len(self[1]) == 20 \\\n and self[2] == OP_EQUAL\n\n def is_p2wpkh(self) -> bool:\n '''Returns whether the script follows the\n OP_0 <20 byte hash> pattern.'''\n return len(self) == 2 and self[0] == OP_0 and len(self[1]) == 20\n\n def is_p2wsh(self) -> bool:\n '''Returns whether the script follows the\n OP_0 <32 byte hash> pattern.'''\n return len(self) == 2 and self[0] == OP_0 and len(self[1]) == 32\n\n def is_segwit(self) -> bool:\n return self.is_p2wpkh() or self.is_p2wsh()\n\n def is_sequence_locked(self) -> bool:\n '''Returns whether the script starts with\n OP_CSV OP_DROP'''\n return len(self) >= 3 and \\\n (is_number_op_code(self[0]) or len(self[0]) > 1) and \\\n self[1] == OP_CHECKSEQUENCEVERIFY and self[2] == OP_DROP\n\n def is_timelock(self) -> bool:\n '''Returns whether the script follows the\n locktime OP_CLTV OP_DROP OP_CHECKSIG pattern'''\n return (self.is_sequence_locked() or self.is_locktime_locked()) and \\\n len(self) == 5 and len(self[3]) == 33 and self[4] == OP_CHECKSIG\n\n def pubkeys(self) -> List[bytes]:\n pubkeys = []\n for item in self:\n if len(item) == 33 and item[0] in (2, 3):\n pubkeys.append(item)\n return pubkeys\n\n def raw_serialize(self) -> bytes:\n # initialize what we'll send back\n result = b''\n # go through each command\n for current in self:\n if current == OP_0:\n result += int_to_byte(0)\n elif OP_CODE_NAMES.get(current) is None:\n # this is an element\n # get the length in bytes\n length = len(current)\n # for large lengths, we have to use a pushdata op code\n if length < 75:\n # turn the length into a single byte integer\n result += int_to_byte(length)\n elif length > 75 and length < 0x100:\n # 76 is pushdata1\n result += OP_PUSHDATA1\n result += int_to_byte(length)\n elif length >= 0x100 and length <= 520:\n # 77 is pushdata2\n result += OP_PUSHDATA2\n result += int_to_little_endian(length, 2)\n else:\n raise ValueError('too long a command')\n result += current\n return result\n\n def serialize(self) -> bytes:\n return encode_varstr(self.raw_serialize())\n\n\nclass ScriptPubKey(Script):\n '''Represents a ScriptPubKey in a transaction'''\n @classmethod\n def parse(cls, s: BufferedIOBase) -> ScriptPubKey:\n script_pubkey = super().parse(s)\n if script_pubkey.is_p2pkh():\n return PKHScriptPubKey.from_hash(script_pubkey[2])\n elif script_pubkey.is_p2sh():\n return SHScriptPubKey.from_hash(script_pubkey[1])\n elif script_pubkey.is_p2wpkh():\n return WPKHScriptPubKey.from_hash(script_pubkey[1])\n elif script_pubkey.is_p2wsh():\n return WSHScriptPubKey.from_hash(script_pubkey[1])\n else:\n return script_pubkey\n\n def redeem_script(self) -> RedeemScript:\n '''Convert this ScriptPubKey to its RedeemScript equivalent'''\n return RedeemScript(self)\n\n\nclass PKHScriptPubKey(ScriptPubKey):\n @classmethod\n def from_hash(cls, h160: bytes) -> PKHScriptPubKey:\n if len(h160) != 20:\n raise TypeError('h160 should be 20 bytes')\n return cls([OP_DUP, OP_HASH160, h160, OP_EQUALVERIFY, OP_CHECKSIG])\n\n def hash160(self) -> bytes:\n return self[2]\n\n\nclass SHScriptPubKey(ScriptPubKey):\n @classmethod\n def from_hash(cls, h160: bytes) -> SHScriptPubKey:\n if len(h160) != 20:\n raise TypeError('h160 should be 20 bytes')\n return cls([OP_HASH160, h160, OP_EQUAL])\n\n def hash160(self) -> bytes:\n return self[1]\n\n\nclass RedeemScript(Script):\n '''Subclass that represents a RedeemScript for p2sh'''\n def hash160(self) -> bytes:\n '''Returns the hash160 of the serialization of the RedeemScript'''\n return hash160(self.raw_serialize())\n\n def script_pubkey(self) -> SHScriptPubKey:\n '''Returns the ScriptPubKey that this RedeemScript corresponds to'''\n return SHScriptPubKey.from_hash(self.hash160())\n\n\nclass SegwitPubKey(ScriptPubKey):\n def hash(self) -> bytes:\n return self[1]\n\n\nclass WPKHScriptPubKey(SegwitPubKey):\n @classmethod\n def from_hash(cls, h160: bytes) -> WPKHScriptPubKey:\n if len(h160) != 20:\n raise TypeError('h160 should be 20 bytes')\n return cls([OP_0, h160])\n\n\nclass WSHScriptPubKey(SegwitPubKey):\n @classmethod\n def from_hash(cls, s256: bytes) -> WSHScriptPubKey:\n if len(s256) != 32:\n raise TypeError('s256 should be 32 bytes')\n return cls([OP_0, s256])\n\n\nclass WitnessScript(Script):\n '''Subclass that represents a WitnessScript for p2wsh'''\n def redeem_script(self) -> RedeemScript:\n return self.script_pubkey().redeem_script()\n\n def script_pubkey(self) -> WSHScriptPubKey:\n '''Generates the ScriptPubKey for p2wsh'''\n # get the sha256 of the current script\n # return new p2wsh script using p2wsh_script\n return WSHScriptPubKey.from_hash(self.sha256())\n\n def sha256(self) -> bytes:\n '''Returns the sha256 of the raw serialization for witness program'''\n return sha256(self.raw_serialize())\n\n\nclass MultiSigScript(Script):\n @classmethod\n def from_pubkeys(cls, k: int, sec_pubkeys: List[bytes]) -> MultiSigScript:\n n = len(sec_pubkeys)\n if k == 0 or k > n:\n raise ValueError(f'cannot do {k} of {n} keys')\n return cls([\n number_to_op_code(k), *sorted(sec_pubkeys),\n number_to_op_code(n), OP_CHECKMULTISIG\n ])\n\n\nclass MultiSigRedeemScript(RedeemScript, MultiSigScript):\n pass\n\n\nclass MultiSigWitnessScript(WitnessScript, MultiSigScript):\n pass\n\n\nclass TimelockScript(Script):\n @classmethod\n def from_time(cls,\n locktime: Optional[Locktime] = None,\n sequence: Optional[Sequence] = None) -> List[bytes]:\n if locktime is not None:\n return [\n encode_minimal_num(locktime), OP_CHECKLOCKTIMEVERIFY, OP_DROP\n ]\n elif sequence is not None:\n return [\n encode_minimal_num(sequence), OP_CHECKSEQUENCEVERIFY, OP_DROP\n ]\n else:\n raise ValueError('locktime or sequence required')\n\n\nclass SingleSigTimelockScript(TimelockScript):\n @classmethod\n def from_pubkey_time(\n cls,\n sec: bytes,\n locktime: Optional[Locktime] = None,\n sequence: Optional[Sequence] = None) -> SingleSigTimelockScript:\n script = cls.from_time(locktime, sequence) + [sec, OP_CHECKSIG]\n return cls(script)\n\n\nclass SingleSigTimelockRedeemScript(RedeemScript, SingleSigTimelockScript):\n pass\n\n\nclass SingleSigTimelockWitnessScript(WitnessScript, SingleSigTimelockScript):\n pass\n\n\nclass MultiSigTimelockScript(TimelockScript, MultiSigScript):\n @classmethod\n def from_pubkeys_time(\n cls,\n k: int,\n sec_pubkeys: List[bytes],\n locktime: Optional[Locktime] = None,\n sequence: Optional[Sequence] = None) -> MultiSigTimelockScript:\n script = cls.from_time(locktime, sequence) + cls.from_pubkeys(\n k, sec_pubkeys)\n return cls(script)\n\n\nclass MultiSigTimelockRedeemScript(RedeemScript, MultiSigTimelockScript):\n pass\n\n\nclass MultiSigTimelockWitnessScript(WitnessScript, MultiSigTimelockScript):\n pass\n"},"repo_name":{"kind":"string","value":"jimmysong/minipy"},"sub_path":{"kind":"string","value":"script.py"},"file_name":{"kind":"string","value":"script.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":13382,"string":"13,382"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"en"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":1,"string":"1"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"6"}}},{"rowIdx":396,"cells":{"seq_id":{"kind":"string","value":"7626498457"},"text":{"kind":"string","value":"\r\nvertices = []\r\n\r\narestas = []\r\n\r\nmatriz = []\r\n\r\nclass Grafo:\r\n def __init__(self, no, noAux, prioridade):\r\n\r\n self.no = no\r\n self.noAux = noAux\r\n self.prioridade = prioridade\r\n\r\ngrafo = open('arquivomatriz.txt', 'r')\r\n\r\nfor i in grafo:\r\n\r\n linha = i.split()\r\n\r\n arestas.append(Grafo(int(linha[0]), int(linha[1]), int(linha[2])))\r\n\r\ngrafo.close()\r\n\r\ndef Inserir(vector):\r\n\r\n inserido = False\r\n\r\n for i in range( len(vertices) ):\r\n\r\n if (vector == vertices[i]):\r\n\r\n inserido = True\r\n break\r\n\r\n return inserido\r\n\r\n\r\nfor i in range( len(arestas) ):\r\n\r\n if(not Inserir(arestas[i].no)):\r\n\r\n vertices.append(arestas[i].no)\r\n\r\n if(not Inserir(arestas[i].noAux)):\r\n\r\n vertices.append(arestas[i].noAux)\r\nvertices = sorted(vertices)\r\n\r\n\r\nfor i in range( len(vertices) ): #Preenche matriz com 0's\r\n\r\n linha = []\r\n\r\n for j in range( len(vertices) ):\r\n\r\n linha.append(0)\r\n\r\n matriz.append(linha)\r\n\r\n\r\nfor i in range( len(arestas) ): # matriz adjacente\r\n\r\n matriz[arestas[i].no][arestas[i].noAux] = arestas[i].prioridade\r\n matriz[arestas[i].noAux][arestas[i].no] = arestas[i].prioridade\r\n\r\n\r\nprint()\r\nprint(\"Matriz Adja: \")\r\n\r\nfor i in range( len(matriz) ):\r\n\r\n print(matriz[i])\r\n\r\nprint()\r\n\r\n\r\nprint(\"O grau de cada vértice é: \")\r\n\r\nfor i in range( len(matriz) ):\r\n\r\n g = 0\r\n\r\n for j in range( len(matriz[i]) ):\r\n\r\n if(matriz[i][j] != 0):\r\n\r\n g += 1\r\n \r\n print('grau do {}: {}'.format(i,g) )"},"repo_name":{"kind":"string","value":"gustavoadl06/Gustavo"},"sub_path":{"kind":"string","value":"6.py"},"file_name":{"kind":"string","value":"6.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":1535,"string":"1,535"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"pt"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":0,"string":"0"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"6"}}},{"rowIdx":397,"cells":{"seq_id":{"kind":"string","value":"36670049284"},"text":{"kind":"string","value":"import matplotlib.pyplot as plt\n# from mpl_toolkits.axes_grid1 import ImageGrid\n# import numpy as np\nfrom os import listdir\nfrom os import chdir\nfrom os import path\nfrom PIL import Image\n# import matplotlib.gridspec as gridspec\nimport argparse\n\nparser = argparse.ArgumentParser(description=\"generate plot for report\")\nparser.add_argument(\"--input_dir\", required=True, help=\"Input ROS bag.\")\nparser.add_argument(\"--rows\", required=True, help=\"numer of rows in figure\")\nparser.add_argument(\"--cols\", required=True, help=\"number of columns in figure\")\n\nargs = parser.parse_args()\n\n# chdir('/Volumes/macOS Big Sur/Users/pmvanderburg/matplotlib_test/')\nchdir(args.input_dir)\nfiles = listdir(args.input_dir)\nfiles.sort()\n\n\nfor i, f in enumerate(files):\n if f!='.DS_Store':\n print(i,f)\n else:\n del files[i]\n\nimages = [Image.open(f) for f in files]\nprint(len(images))\nmax_rows = 7\nmax_cols = 3\n# max_rows = 3\n# max_cols = 2\n\nmethods=['Input image',\n '640x480 N+FT',\n '832x256 K+FT',\n '640x480 N',\n '832x256 N',\n '640x480 K',\n '832x256 K']\n\nfig, axes = plt.subplots(nrows=7, ncols=3, figsize=(9,10),sharex=True, sharey=True)\nfor idx, image in enumerate(images):\n # print(files[idx])\n print(idx)\n row = idx % max_rows\n col = idx // max_rows\n print(row,' row')\n print(col,' col')\n # if col>0:\n # axes[row, col].axis(\"off\")\n axes[row,col].spines['bottom'].set_color('#ffffff')\n axes[row,col].spines['top'].set_color('#ffffff')\n axes[row,col].spines['right'].set_color('#ffffff')\n axes[row,col].spines['left'].set_color('#ffffff')\n\n if image.size==(1280, 720):\n image = image.resize((640,480))\n\n axes[row, col].imshow(image, cmap=\"gray\", aspect=\"auto\")\n\n axes[row, 0].set_ylabel(methods[row])\n\nplt.subplots_adjust(wspace=.05, hspace=.05)\nplt.xticks([])\nplt.yticks([])\n# fig.savefig(path.join)\nplt.show()\n"},"repo_name":{"kind":"string","value":"ThijsvdBurg/Husky_scripts"},"sub_path":{"kind":"string","value":"data_visualization/plot scripts/plot_results.py"},"file_name":{"kind":"string","value":"plot_results.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":1911,"string":"1,911"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"en"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":1,"string":"1"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"6"}}},{"rowIdx":398,"cells":{"seq_id":{"kind":"string","value":"10858272527"},"text":{"kind":"string","value":"\n# coding: utf-8\n\n# In[1]:\n\n\nfrom pandas import DataFrame, read_csv\nimport matplotlib.pyplot as plt\nimport pandas as pd \nimport numpy as np\n\n\nfrom sklearn.decomposition import TruncatedSVD\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.feature_extraction.text import HashingVectorizer\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.preprocessing import Normalizer\nfrom sklearn import metrics\n\nfrom sklearn.cluster import KMeans, MiniBatchKMeans\n\nimport sys\nfrom time import time\n\nimport numpy as np\n\n\n# In[2]:\n\n\ndf = pd.read_csv('lyrics.csv')\ndf.head(10)\n\n\n# In[3]:\n\n\ndf['lyrics'].replace('', np.nan, inplace=True)\ndf.dropna(subset=['lyrics'], inplace=True)\nind_drop = df[df['genre'].apply(lambda x: x.startswith('Other'))].index\ndf = df.drop(ind_drop)\n\n\n# In[4]:\n\n\nind_drop = df[df['genre'].apply(lambda x: x.startswith('Not Available'))].index\ndf = df.drop(ind_drop)\n\n\n# In[5]:\n\n\nind_drop = df[df['lyrics'].apply(lambda x: x.startswith('INSTRUMENTAL'))].index\ndf = df.drop(ind_drop)\ndf.drop(columns=['index'])\n\nind_drop = df[df['lyrics'].apply(lambda x: x.startswith('instrumental'))].index\ndf = df.drop(ind_drop)\ndf.drop(columns=['index'])\n\n\n# In[6]:\n\n\ngenre=df['genre'].values\nlyrics=df['lyrics'].values\ntrue_k = len(np.unique(genre))\nprint(np.unique(genre), \"The total number of genres are\", true_k)\n\n\n#shaping:\nlyrics = np.array(lyrics)[:,None]\nprint(lyrics.shape)\ngenre = np.array(genre)[:,None]\nprint(genre.shape)\n\n\n# In[7]:\n\n\ndata = np.append(lyrics,genre,axis=1)\ndata.shape\nprint(data)\n\n\n# In[8]:\n\n\nnp.random.shuffle(data)\n\ndata_test = data[10001:20001,]\ndata = data[:10000,]\n\n\n# In[9]:\n\n\ndata_lyrics=data[:,0]\ndata_genre=data[:,1]\n\ndata_lyrics_test = data_test[:,0]\ndata_genre_test = data_test[:,1]\n\n# print(data_lyrics)\n# print(data_genre.shape)\n\n\n# In[10]:\n\n\nvectorizer = TfidfVectorizer( \n max_df=0.75, # max doc freq (as a fraction) of any word to include in the vocabulary\n min_df=0.3, # min doc freq (as doc counts) of any word to include in the vocabulary\n max_features=10000, # max number of words in the vocabulary\n stop_words='english', # remove English stopwords\n use_idf=True ) \n\n\n# In[11]:\n\n\nlabels={'Country':1, 'Electronic':2, 'Folk':3, 'Hip-Hop':4, 'Indie':5, 'Jazz':6,\n 'Metal':7, 'Pop':8, 'R&B':9, 'Rock':10}\n\nprint(\"Extracting features from the training dataset using a sparse vectorizer\")\nt0 = time()\nvectorizer.fit(data_lyrics)\n\nX = vectorizer.transform(data_lyrics)\nY = [labels[i] for i in data_genre]\n\nX_test = vectorizer.transform(data_lyrics_test)\nY_test = [labels[i] for i in data_genre_test]\n\nn_features = X.shape[1]\nprint(\"done in %fs\" % (time() - t0))\nprint(\"n_samples: %d, n_features: %d\" % X.shape)\n\n\n# In[12]:\n\n\ndoc_ind = 1 # Index of an example document\nxi = X[doc_ind,:].todense()\nterm_ind = xi.argsort()[:, ::-1]\nxi_sort = xi[0,term_ind]\nterms = vectorizer.get_feature_names()\n\nfor i in range(n_features):\n term = terms[term_ind[0,i]]\n tfidf = xi[0,term_ind[0,i]]\n print('{0:20s} {1:f} '.format(term, tfidf))\n\n\n# In[13]:\n\n\nkm = KMeans(n_clusters=true_k, init='k-means++', max_iter=100, n_init=1,\n verbose=True)\n\n\n# In[14]:\n\n\nprint(\"Clustering sparse data with %s\" % km)\nt0 = time()\nkm.fit(X)\nprint(\"done in %0.3fs\" % (time() - t0))\nprint()\n\n\n# In[15]:\n\n\norder_centroids = km.cluster_centers_.argsort()[:, ::-1]\nfor i in range(true_k):\n print(\"Cluster %d:\" % i, end='')\n for ind in order_centroids[i, :10]:\n print(' %s' % terms[ind], end='')\n print()\n\n\n# In[16]:\n\n\nlabels={'Country':1, 'Electronic':2, 'Folk':3, 'Hip-Hop':4, 'Indie':5, 'Jazz':6,\n 'Metal':7, 'Pop':8, 'R&B':9, 'Rock':10}\nprint(labels.values)\n# genre_names\n# data_genre\ngenre_labels=[]\n#print(genre_labels.shape)\nfor j,i in enumerate(data_genre):\n x=labels[i]\n #print(x)\n np.append(genre_labels,x)\n genre_labels.append(x)\n#print(genre_labels)\n\n\n# In[17]:\n\n\nprint((Y_test == km.predict(X_test)).sum() / len(Y_test))\n\n\n# In[18]:\n\n\nlabelkm = km.labels_\nprint(labelkm.shape)\nprint(type(labelkm))\n\n\n# In[19]:\n\n\n#print(data_genre)\nlabelkm = km.labels_\nfrom sklearn.metrics import confusion_matrix\nC = confusion_matrix(genre_labels,labelkm)\n\nCsum = np.sum(C,axis=0)\nCnorm = C / Csum[None,:]\nprint(Cnorm)\nprint(np.array_str(C, precision=3, suppress_small=True))\nplt.imshow(C, interpolation='none')\nplt.colorbar()\n\n"},"repo_name":{"kind":"string","value":"TejaishwaryaGagadam/music_genre_predictor"},"sub_path":{"kind":"string","value":"K_Means_Clustering.py"},"file_name":{"kind":"string","value":"K_Means_Clustering.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":4472,"string":"4,472"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"en"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":0,"string":"0"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"6"}}},{"rowIdx":399,"cells":{"seq_id":{"kind":"string","value":"10931063926"},"text":{"kind":"string","value":"import unittest\n\nimport requests_mock\n\nfrom alertaclient.api import Client\n\n\nclass PermissionTestCase(unittest.TestCase):\n\n def setUp(self):\n self.client = Client()\n\n self.perm = \"\"\"\n {\n \"id\": \"584f38f4-b44e-4d87-9b61-c106d21bcc7a\",\n \"permission\": {\n \"href\": \"http://localhost:8080/perm/584f38f4-b44e-4d87-9b61-c106d21bcc7a\",\n \"id\": \"584f38f4-b44e-4d87-9b61-c106d21bcc7a\",\n \"match\": \"websys\",\n \"scopes\": [\n \"admin:users\",\n \"admin:keys\",\n \"write\"\n ]\n },\n \"status\": \"ok\"\n }\n \"\"\"\n\n @requests_mock.mock()\n def test_permission(self, m):\n m.post('http://localhost:8080/perm', text=self.perm)\n perm = self.client.create_perm(role='websys', scopes=['admin:users', 'admin:keys', 'write'])\n self.assertEqual(perm.match, 'websys')\n self.assertEqual(sorted(perm.scopes), sorted(['admin:users', 'admin:keys', 'write']))\n"},"repo_name":{"kind":"string","value":"alerta/python-alerta-client"},"sub_path":{"kind":"string","value":"tests/unit/test_permissions.py"},"file_name":{"kind":"string","value":"test_permissions.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":1065,"string":"1,065"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"en"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":27,"string":"27"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"6"}}}],"truncated":false,"partial":true},"paginationData":{"pageIndex":3,"numItemsPerPage":100,"numTotalItems":1115872,"offset":300,"length":100}},"jwt":"eyJhbGciOiJFZERTQSJ9.eyJyZWFkIjp0cnVlLCJwZXJtaXNzaW9ucyI6eyJyZXBvLmNvbnRlbnQucmVhZCI6dHJ1ZX0sImlhdCI6MTc1NTg5MzI0OCwic3ViIjoiL2RhdGFzZXRzL2JjYi1pbnN0cnVjdC9iY2JfZGF0YSIsImV4cCI6MTc1NTg5Njg0OCwiaXNzIjoiaHR0cHM6Ly9odWdnaW5nZmFjZS5jbyJ9.0w_TkMthOFagqzWOXs0jfpSOEDDqBm5ljh4IEunc_Yb7jvd3s6sUBNUJjbmayqWxxbpCAybuPArVpabVIZf8DA","displayUrls":true},"discussionsStats":{"closed":0,"open":1,"total":1},"fullWidth":true,"hasGatedAccess":true,"hasFullAccess":true,"isEmbedded":false,"savedQueries":{"community":[],"user":[]}}">
seq_id
stringlengths
4
11
text
stringlengths
113
2.92M
repo_name
stringlengths
4
125
sub_path
stringlengths
3
214
file_name
stringlengths
3
160
file_ext
stringclasses
18 values
file_size_in_byte
int64
113
2.92M
program_lang
stringclasses
1 value
lang
stringclasses
93 values
doc_type
stringclasses
1 value
stars
int64
0
179k
dataset
stringclasses
3 values
pt
stringclasses
78 values
21478478680
import logging, datetime, sys from modules import * args = parser.parse_args() start_time = datetime.datetime.now() logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') # create a file handler for INFO handler = logging.FileHandler(CONFIG['log_path'] + 'info_the_release_note.log') handler.setLevel(logging.INFO) handler.setFormatter(formatter) # create a file handler for DEBUG debug_handler = logging.FileHandler(CONFIG['log_path'] + 'debug_the_release_note.log') debug_handler.setLevel(logging.DEBUG) debug_handler.setFormatter(formatter) # add the handlers to the logger logger.addHandler(handler) if args.debug: print("Now running in debug mode.") logger.setLevel(logging.DEBUG) logger.addHandler(debug_handler) dzr = Deezer() weekday = datetime.datetime.today().weekday() # Retrieve users, either from args of a contact list if args.user: args.do_not_send = True if not args.email else False users = [{ 'deezer_user_id': int(user), 'email': args.email } for user in args.user] else: try: users = getContacts(args.contact_list_id) if args.contact_list_id else getContacts(CONFIG['contact_list_id']) except Exception as e: logger.info("An error occured while trying to retrieve the contact list.") logger.debug(e) sys.exit(2) logger.info(str(len(users)) + ' users found.') logger.debug(users) for user in users: print("Checking new releases for user id " + str(user['deezer_user_id']) + "...") logger.info("Checking new releases for user id " + str(user['deezer_user_id']) + "...") if args.released_since: released_since = args.released_since else: try: # For weekly users, send new releases on friday only if weekday != 4 and user['frequency'] == 'weekly': logger.debug("Skipping this user as he's a weekly user and will only receive new releases on Friday.") continue else: released_since = { 'daily': 1, 'weekly': 7 }.get(user['frequency'], 1) except KeyError as e: logger.debug("Frequency setting not found. Fallback to default value.") released_since = 1 except Exception as e: logger.debug("An error occured while trying to retrieve the frequency setting:") logger.debug(e) continue try: new_releases = dzr.getNewReleases(user['deezer_user_id'], released_since) except IOError as e: logger.debug("Stopwords and banned artists could not be retrieved.") logger.debug(e) sys.exit(2) except Exception as e: logger.debug(e) sys.exit(2) nb_releases = len(new_releases) logger.info("User id " + str(user['deezer_user_id']) + " has " + str(nb_releases) + " albums released in the past " + str(released_since) + " days.") logger.debug(new_releases) if nb_releases < 1: continue # Store new releases into database try: db = Database() db.storeNewReleases(new_releases, user['deezer_user_id']) del(db) except Exception as e: logger.info("An error occured while trying to store the new releases in the database.") logger.debug(e) # Send new releases by email subject = "♩ Have you listened to " + new_releases[0]['artist']['name'] + "'s new album ?" contenthtml = get_template(new_releases, user['deezer_user_id']) if not args.do_not_send: try: send = sendMail(CONFIG['from_mail'], CONFIG['from_name'], user['email'], subject, contenthtml) logger.info("Sending email - Status: " + str(send.status_code)) logger.debug(send.headers) except Exception as e: logger.info("An error occured while trying to send the mail.") logger.debug(e) sys.exit(2) print('Done') logger.info("Done in %s seconds " % (datetime.datetime.now() - start_time).total_seconds())
greird/the-release-note
the-release-note.py
the-release-note.py
py
3,711
python
en
code
2
github-code
6
21256935702
# 500 Exercícios Resolvidos com Python # Thiago Barros # Exercícios resolvidos com base no Livro - 500 Algoritimos Resolvidos (ANITA LOPES E GUTO GARCIA) # Algoritimo Numero 122 # Capitulo 3 """ Ler três números e verificar se os três números são possiveis de serem lados de um triângulo """ lde1 = float(input("Entre com o Primeiro Lado")) lde2 = float(input("Entre com o Segundo Lado Lado")) lde3 = float(input("Entre com o Terceiro Lado Lado")) if (lde1 < lde2 + lde3) and (lde2 < lde3 + lde1) and (lde3 < lde1 + lde2): print("Sim , os valores são lados de um triângulo") else: print("Não, os valores não são lados de um triângulo")
Tbarros1996/500algoritmos
Capitulo_3/algoritmo_122.py
algoritmo_122.py
py
670
python
pt
code
0
github-code
6
39404813463
from django.urls import path from .views import ( ColaboradorList, ColaboradorUpdate, ColaboradorDelete, ColaboradorCreate, ColaboradorReport, HtmlPdf, ) urlpatterns = [ path('listar', ColaboradorList.as_view(), name='list_colaborador'), path('criar', ColaboradorCreate.as_view(), name='create_colaborador'), path('editar/<int:pk>', ColaboradorUpdate.as_view(), name='update_colaborador'), path('excluir/<int:pk>', ColaboradorDelete.as_view(), name='delete_colaborador'), path('relatorio', ColaboradorReport, name='report_colaborador'), path('relatorio_html', HtmlPdf.as_view(), name='report_colaborador_html'), ]
fabiogpassos/GRH
apps/colaboradores/urls.py
urls.py
py
664
python
es
code
0
github-code
6
10033128545
# Créé par LEWEEN.MASSIN, le 23/03/2023 en Python 3.7 from csv import reader as read file = '49-prenoms-2013-22.csv' name = 'Aaron' gender = 'F' year = 2019 def import_table(file): l=[] with open(file, 'r') as csv_open: csv_read = read(csv_open, delimiter=';') for row in csv_read: l.append(row) return l def easier_table(file): l = import_table(file) l.pop(0) for row in l: for i in range(2): row.pop(0) return l def nbr_enfants(file, name): l = easier_table(file) nbr = 0 for row in l: if row[1] == name: nbr+=int(row[2]) return nbr def nbr_gender(file, gender, year): l = easier_table(file) occur = 0 for row in l: if row[0] == gender and int(row[3]) == year: occur += 1 return occur print(f"{name}: {nbr_enfants(file, name)}") print(f"{gender} en {year}: {nbr_gender(file, gender, year)}")
Remingusu/NSI_premiere
7 - Traitement de données en tables/Projet Traitement de données en tables/main.py
main.py
py
957
python
en
code
0
github-code
6
43193667766
#!/usr/bin/env python import rospy import smach from PrintColours import * from std_msgs.msg import String,UInt8 from mavros_msgs.msg import ExtendedState # import custom message: from muav_state_machine.msg import UAVState # global variables to catch the data from the agent state machine node airframe_type = "" mission_state = "" wp_reached = 0 extended_state = ExtendedState() uav_state = 0 landed_state = 0 flight_status_dji = 0 #callback functions def airframe_type_cb(msg): global airframe_type airframe_type = msg.data def mission_state_cb(msg): global mission_state mission_state = msg.data def wp_reached_cb(msg): global wp_reached wp_reached = msg.data def estate_cb(msg): global extended_state extended_state = msg def flight_status_dji_cb(msg): global flight_status_dji flight_status_dji = msg.data class UavState(smach.State): def __init__(self,uav_id):#modify, common_data smach.State.__init__( self, outcomes=['mission_finished', 'shutdown']) self.uav_id = uav_id def execute(self, ud): UAVState_pub = rospy.Publisher("/muav_sm/uav_{}/uavstate".format(self.uav_id), UAVState, queue_size=10) rospy.loginfo('[UavState] - UAV{} state'.format(self.uav_id)) rate = rospy.Rate(20) # 20hz UAVState_msg = UAVState() #subscribers initialization autopilot_sub = rospy.Subscriber("/uav_{}_sm/com/airframe_type".format(self.uav_id), String, airframe_type_cb) mission_state_sub = rospy.Subscriber("/uav_{}_sm/com/mission_state".format(self.uav_id), String, mission_state_cb) wp_reached_sub = rospy.Subscriber("/uav_{}_sm/com/wp_reached".format(self.uav_id), UInt8, wp_reached_cb) if airframe_type=="px4/vtol": extended_state_sub = rospy.Subscriber("/uav_{}_sm/com/extended_state".format(self.uav_id), ExtendedState, estate_cb) # subscribers for dji data if airframe_type=="dji/M210": flight_status_dji_sub = rospy.Subscriber("/uav_{}_sm/com/flight_status_dji".format(self.uav_id), UInt8, flight_status_dji_cb) # UAVState_msg initialization UAVState_msg.airframe_type = "px4/vtol" UAVState_msg.mission_state = "idle" UAVState_msg.wp_reached = 0 UAVState_msg.uav_state = 0 UAVState_msg.landed_state = 0 # transition to X state while not rospy.is_shutdown(): # TBD: error detection if not namespaces with the name of the uav_id #rospy.loginfo('[UavState] - UAV{} state: airframetype: {}, mission_state: {}, wp_reached: {}, extended_state: {}'.format(self.uav_id,autopilot,mission_state,wp_reached,extended_state)) #fill the UAVState_msg custom message UAVState_msg.airframe_type = airframe_type #parameter UAVState_msg.mission_state = mission_state#published by the agent state machine if airframe_type=="px4/vtol" and mission_state=="mission_running": UAVState_msg.wp_reached = wp_reached #published by the agent state machine UAVState_msg.uav_state = extended_state.vtol_state #published by the agent state machine UAVState_msg.landed_state = extended_state.landed_state else: UAVState_msg.wp_reached = 0 #TBD: create a function to do it in the DJI UAVState_msg.uav_state = 0 UAVState_msg.landed_state = 0 # modified with dji data if airframe_type=="dji/M210" and mission_state=="mission_running": UAVState_msg.landed_state = flight_status_dji #published by the agent state machine #publish the UAVState_msg custom message UAVState_pub.publish(UAVState_msg) #finish this state: # if UAVState_msg.mission_state == "idle": # return 'mission_finished' rate.sleep() return 'shutdown'
miggilcas/muav_state_machine
scripts/GStates/uav_state.py
uav_state.py
py
4,040
python
en
code
0
github-code
6
43431937253
#!/usr/bin/env python3 """ Evaluator """ import sys import tensorflow as tf from utils import decode_img, image_patches, write_tensor_as_image from model import image_diff, UPSCALER_FACTOR def main(): """ Main function """ try: image_path = sys.argv[1] except: print("Usage: {} <image path>".format(sys.argv[0])) exit(-1) try: model_path = sys.argv[2] except: model_path = './saved_model' PATCH_SIZE = 240 // UPSCALER_FACTOR N_CHANNELS = 3 model = tf.keras.models.load_model(model_path) image = decode_img(image_path, N_CHANNELS) patches = image_patches(image, PATCH_SIZE, PATCH_SIZE, N_CHANNELS) model_out = model(patches) for idx, (patch_in, patch_out) in enumerate(zip(patches, model_out)): write_tensor_as_image("{}a.png".format(idx), patch_in) write_tensor_as_image("{}b.png".format(idx), patch_out) #image = tf.expand_dims(image, axis=0) #model_out = model(image) #model_out = tf.squeeze(model_out, axis=0) #write_tensor_as_image("out.png", model_out) if __name__ == "__main__": main()
Masterchef365/ENHANCE
eval.py
eval.py
py
1,123
python
en
code
0
github-code
6
17529991766
import os, glob, asyncio class CommandDispatcher: """Register commands and run them""" def __init__(self): self.commands = {} self.commands_admin = [] self.unknown_command = None def get_admin_commands(self, bot, conv_id): """Get list of admin-only commands (set by plugins or in config.json)""" commands_admin = bot.get_config_suboption(conv_id, 'commands_admin') or [] return list(set(commands_admin + self.commands_admin)) @asyncio.coroutine def run(self, bot, event, *args, **kwds): """Run command""" try: func = self.commands[args[0]] except KeyError: if self.unknown_command: func = self.unknown_command else: raise args = list(args[1:]) try: yield from func(bot, event, *args, **kwds) except Exception as e: print(e) def register(self, *args, admin=False): """Decorator for registering command""" def wrapper(func): # Automatically wrap command function in coroutine func = asyncio.coroutine(func) self.commands[func.__name__] = func if admin: self.commands_admin.append(func.__name__) return func # If there is one (and only one) positional argument and this argument is callable, # assume it is the decorator (without any optional keyword arguments) if len(args) == 1 and callable(args[0]): return wrapper(args[0]) else: return wrapper def register_unknown(self, func): """Decorator for registering unknown command""" # Automatically wrap command function in coroutine func = asyncio.coroutine(func) self.unknown_command = func return func # Create CommandDispatcher singleton command = CommandDispatcher() # Build list of commands _plugins = glob.glob(os.path.join(os.path.dirname(__file__), "*.py")) __all__ = [os.path.splitext(os.path.basename(f))[0] for f in _plugins if os.path.isfile(f) and not os.path.basename(f).startswith("_")] # Load all commands from hangupsbot.commands import *
xmikos/hangupsbot
hangupsbot/commands/__init__.py
__init__.py
py
2,229
python
en
code
105
github-code
6
7165790234
import argparse import logging import sys from itertools import chain from logging import getLogger from typing import Iterable, Optional, Union from competitive_verifier import oj from competitive_verifier.arg import add_verify_files_json_argument from competitive_verifier.error import VerifierError from competitive_verifier.log import configure_logging from competitive_verifier.models import ( ProblemVerification, VerificationFile, VerificationInput, ) from competitive_verifier.resource import ulimit_stack logger = getLogger(__name__) UrlOrVerificationFile = Union[str, VerificationFile] def parse_urls( input: Union[UrlOrVerificationFile, Iterable[UrlOrVerificationFile]] ) -> Iterable[str]: def parse_single(url_or_file: UrlOrVerificationFile) -> Iterable[str]: if isinstance(url_or_file, str): return (url_or_file,) else: return enumerate_urls(url_or_file) if isinstance(input, (str, VerificationFile)): return parse_single(input) return chain.from_iterable(parse_single(uf) for uf in input) def enumerate_urls(file: VerificationFile) -> Iterable[str]: for v in file.verification: if isinstance(v, ProblemVerification): yield v.problem def run_impl( input: Union[UrlOrVerificationFile, Iterable[UrlOrVerificationFile]], check: bool = False, group_log: bool = False, ) -> bool: result = True try: ulimit_stack() except Exception: logger.warning("failed to increase the stack size[ulimit]") for url in parse_urls(input): if not oj.download(url, group_log=group_log): result = False if check and not result: raise VerifierError("Failed to download") return result def run(args: argparse.Namespace) -> bool: logger.debug("arguments=%s", vars(args)) logger.info("verify_files_json=%s", str(args.verify_files_json)) logger.info("urls=%s", args.urls) files: list[VerificationFile] = [] if args.verify_files_json: verification = VerificationInput.parse_file_relative(args.verify_files_json) files = list(verification.files.values()) return run_impl(files + args.urls, group_log=True) def argument(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: add_verify_files_json_argument(parser, required=False) parser.add_argument( "urls", nargs="*", help="A list of problem URL", ) return parser def main(args: Optional[list[str]] = None) -> None: try: configure_logging(logging.INFO) parsed = argument(argparse.ArgumentParser()).parse_args(args) if not run(parsed): sys.exit(1) except Exception as e: sys.stderr.write(str(e)) sys.exit(2) if __name__ == "__main__": main()
competitive-verifier/competitive-verifier
src/competitive_verifier/download/main.py
main.py
py
2,823
python
en
code
8
github-code
6
34199641942
#!/usr/bin/env python import sys import rospy from art_collision_env.int_collision_env import IntCollisionEnv import os def main(): rospy.init_node('collision_env_node', anonymous=True) try: setup = os.environ["ARTABLE_SETUP"] except KeyError: rospy.logfatal("ARTABLE_SETUP has to be set.") return ce = IntCollisionEnv(setup, "marker") ce.load_from_db() ce.start() rospy.spin() if __name__ == '__main__': try: main() except KeyboardInterrupt: print("Shutting down")
robofit/arcor
art_collision_env/src/node.py
node.py
py
551
python
en
code
9
github-code
6
10918517772
""" Plugin entry point for helga """ import math from craigslist_scraper.scraper import scrape_url from helga.plugins import match TEMPLATE = 'Listing title: {}, price: {}' @match(r'[A-Za-z]+\.craigslist\.org/.../\S+') def craigslist_meta(client, channel, nick, message, match): """ Return meta information about a listing """ data = scrape_url('http://' + match[0]) result = TEMPLATE.format(data.title, data.price) for key, value in data.attrs.items(): result += ', {}: {}'.format(key, value) return result
narfman0/helga-craigslist-metadata
helga_craigslist_meta/plugin.py
plugin.py
py
538
python
en
code
0
github-code
6
3682557651
class MapFlags(object): __slots__ = ('_value') name = 'tmwa::map::MapFlags' enabled = True def __init__(self, value): self._value = value['flags'] def to_string(self): i = int(self._value) s = [] for n, v in MapFlags.junk: v = 1 << v if i & v: i -= v s.append(n) if i or not s: s.append('%#08x' % i) return 'MapFlags(%s)' % (' | '.join(s)) junk = [ #('ALIAS', 21), #('NOMEMO', 0), ('NOTELEPORT', 1), ('NORETURN', 22), ('MONSTER_NOTELEPORT', 23), ('NOSAVE', 2), #('NOBRANCH', 3), ('NOPENALTY', 4), ('PVP', 6), ('PVP_NOPARTY', 7), #('PVP_NOGUILD', 8), #('PVP_NIGHTMAREDROP', 24), ('PVP_NOCALCRANK', 25), #('GVG', 9), #('GVG_NOPARTY', 10), #('NOZENYPENALTY', 5), #('NOTRADE', 11), #('NOSKILL', 12), ('NOWARP', 13), ('NOWARPTO', 26), ('NOPVP', 14), ('MASK', 15), ('SNOW', 16), ('FOG', 17), ('SAKURA', 18), ('LEAVES', 19), ('RAIN', 20), ('NO_PLAYER_DROPS', 27), ('TOWN', 28), ('OUTSIDE', 29), ('RESAVE', 30), ] tests = [ ('reinterpret_cast<const tmwa::map::MapFlags&>(static_cast<const unsigned int&>(0x80000000))', 'MapFlags(0x80000000)'), ('reinterpret_cast<const tmwa::map::MapFlags&>(static_cast<const unsigned int&>(0xf0000000))', 'MapFlags(TOWN | OUTSIDE | RESAVE | 0x80000000)'), ] + [ ('tmwa::map::MapFlags(); value.set(tmwa::map::MapFlag::%s, true)' % n, 'MapFlags(%s)' % n) for (n, _) in junk ] + [ ('reinterpret_cast<const tmwa::map::MapFlags&>(static_cast<const unsigned int&>(1 << %d))' % i, 'MapFlags(%s)' % n) for (n, i) in junk ]
themanaworld/tmwa
src/map/mapflag.py
mapflag.py
py
2,048
python
en
code
48
github-code
6
11353299783
""" Problem Statement Given a binary tree, populate an array to represent its level-by-level traversal. You should populate the values of all nodes of each level from left to right in separate sub-arrays. """ from collections import deque class TreeNode: def __init__(self, val): self.val = val self.left, self.right = None, None def traverse(root): result = [] deq = deque() if root: deq.append(root) while deq: length = len(deq) node_list = [] for _ in range(length): current_node = deq.popleft() node_list.append(current_node.val) if current_node.left: deq.append(current_node.left) if current_node.right: deq.append(current_node.right) if node_list: result.append(node_list) return result def main(): root = TreeNode(12) root.left = TreeNode(7) root.right = TreeNode(1) root.left.left = TreeNode(9) root.right.left = TreeNode(10) root.right.right = TreeNode(5) print("Level order traversal: " + str(traverse(root))) main()
jihoonyou/problem-solving
Educative/bfs/example1.py
example1.py
py
1,137
python
en
code
0
github-code
6
6533110297
#!/usr/bin/python3 #-*- coding: utf-8 -*- from moduls.data import * from PyQt4 import QtGui, QtCore, uic # StudentData = StudentData() class MainWindow(QtGui.QMainWindow): """docstring for MainWindow""" def __init__(self): super(MainWindow, self).__init__() self.ui = uic.loadUi("ui\MainWindow.ui", self) self.FormData = FormData() self.FormLayout.addWidget( self.FormData ) self.Faculty = FacultyData() self.FacLayout.addWidget( self.Faculty ) self.Speciality = SpecialityData() self.SpecLayout.addWidget( self.Speciality) self.Students = StudentData( self.Faculty.data[0], self.Speciality.data[0], self.FormData.data[0] ) self.StudLayout.addWidget( self.Students ) self.Contract = ContractData( self.Students.data ) self.ContrLayout.addWidget( self.Contract ) self.connect( self.tabWidget, QtCore.SIGNAL('currentChanged(int)'), self.Contract.update ) self.connect( self.tabWidget, QtCore.SIGNAL('currentChanged(int)'), self.Students.update )
TchippunkT/Kursuch
moduls/windows/MainWindow.py
MainWindow.py
py
1,129
python
en
code
0
github-code
6
24993496301
from osv import fields from osv import osv class dm_address_segmentation(osv.osv): # {{{ _inherit = "dm.address.segmentation" _description = "Order Segmentation" def set_address_criteria(self, cr, uid, ids, context={}): sql_query = super(dm_address_segmentation,self).set_address_criteria(cr, uid, ids, context) if isinstance(ids, (int, long)): ids = [ids] criteria=[] browse_id = self.browse(cr, uid, ids)[0] if browse_id.order_text_criteria_ids: for i in browse_id.order_text_criteria_ids: if i.field_id.ttype == 'many2one': relation_obj = self.pool.get(i.field_id.relation) rec_name = relation_obj._rec_name criteria.append("so.%s in (select id from %s where %s %s '%s' )"%( i.field_id.name, relation_obj._table, rec_name, i.operator, "%"+i.value+"%")) else : criteria.append("so.%s %s '%s'"%(i.field_id.name, i.operator, "%"+i.value+"%")) if browse_id.order_numeric_criteria_ids: for i in browse_id.order_numeric_criteria_ids: criteria.append("so.%s %s %f"%(i.field_id.name, i.operator, i.value)) if browse_id.order_boolean_criteria_ids: for i in browse_id.order_boolean_criteria_ids: criteria.append("so.%s %s %s"%(i.field_id.name, i.operator, i.value)) if browse_id.order_date_criteria_ids: for i in browse_id.order_date_criteria_ids: criteria.append("so.%s %s '%s'"%(i.field_id.name, i.operator, i.value)) if criteria: so_sql_query = ("""select distinct so.partner_invoice_id \nfrom sale_order so\nwhere %s\n""" % (' and '.join(criteria))).replace('isnot','is not') sql_query += '''and pa.id in (%s)'''%so_sql_query return sql_query _columns = { 'order_text_criteria_ids' : fields.one2many('dm.extract.sale.text_criteria', 'segmentation_id', 'Customers Order Textual Criteria'), 'order_numeric_criteria_ids' : fields.one2many('dm.extract.sale.numeric_criteria', 'segmentation_id', 'Customers Order Numeric Criteria'), 'order_boolean_criteria_ids' : fields.one2many('dm.extract.sale.boolean_criteria', 'segmentation_id', 'Customers Order Boolean Criteria'), 'order_date_criteria_ids' : fields.one2many('dm.extract.sale.date_criteria', 'segmentation_id', 'Customers Order Date Criteria'), } dm_address_segmentation() # }}} TEXT_OPERATORS = [ # {{{ ('like','like'), ('ilike','ilike'), ] # }}} NUMERIC_OPERATORS = [ # {{{ ('=','equals'), ('<','smaller then'), ('>','bigger then'), ] # }}} BOOL_OPERATORS = [ # {{{ ('is','is'), ('isnot','is not'), ] # }}} DATE_OPERATORS = [ # {{{ ('=','equals'), ('<','before'), ('>','after'), ] # }}} class dm_extract_sale_text_criteria(osv.osv): # {{{ _name = "dm.extract.sale.text_criteria" _description = "Customer Order Segmentation Textual Criteria" _rec_name = "segmentation_id" _columns = { 'segmentation_id' : fields.many2one('dm.address.segmentation', 'Segmentation'), 'field_id' : fields.many2one('ir.model.fields','Customers Field', domain=[('model_id.model','=','sale.order'), ('ttype','in',['char','many2one'])], context={'model':'sale.order'}, required = True), 'operator' : fields.selection(TEXT_OPERATORS, 'Operator', size=32, required = True), 'value' : fields.char('Value', size=128, required = True), } dm_extract_sale_text_criteria() # }}} class dm_extract_sale_numeric_criteria(osv.osv): # {{{ _name = "dm.extract.sale.numeric_criteria" _description = "Customer Order Segmentation Numeric Criteria" _rec_name = "segmentation_id" _columns = { 'segmentation_id' : fields.many2one('dm.address.segmentation', 'Segmentation'), 'field_id' : fields.many2one('ir.model.fields','Customers Field', domain=[('model_id.model','=','sale.order'), ('ttype','in',['integer','float'])], context={'model':'sale.order'}, required = True), 'operator' : fields.selection(NUMERIC_OPERATORS, 'Operator', size=32, required = True), 'value' : fields.float('Value', digits=(16,2), required = True), } dm_extract_sale_numeric_criteria() # }}} class dm_extract_sale_boolean_criteria(osv.osv): # {{{ _name = "dm.extract.sale.boolean_criteria" _description = "Customer Order Segmentation Boolean Criteria" _rec_name = "segmentation_id" _columns = { 'segmentation_id' : fields.many2one('dm.address.segmentation', 'Segmentation'), 'field_id' : fields.many2one('ir.model.fields','Customers Field', domain=[('model_id.model','=','sale.order'), ('ttype','like','boolean')], context={'model':'sale.order'}, required = True), 'operator' : fields.selection(BOOL_OPERATORS, 'Operator', size=32, required = True), 'value' : fields.selection([('true','True'),('false','False')],'Value', required = True), } dm_extract_sale_boolean_criteria() # }}} class dm_extract_sale_date_criteria(osv.osv): # {{{ _name = "dm.extract.sale.date_criteria" _description = "Customer Order Segmentation Date Criteria" _rec_name = "segmentation_id" _columns = { 'segmentation_id' : fields.many2one('dm.address.segmentation', 'Segmentation'), 'field_id' : fields.many2one('ir.model.fields','Customers Field', domain=[('model_id.model','=','sale.order'), ('ttype','in',['date','datetime'])], context={'model':'sale.order'}, required = True), 'operator' : fields.selection(DATE_OPERATORS, 'Operator', size=32, required = True), 'value' : fields.date('Date', required = True), } dm_extract_sale_date_criteria() # }}} # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
factorlibre/openerp-extra-6.1
dm_extract_sale/dm_extract_sale.py
dm_extract_sale.py
py
6,108
python
en
code
9
github-code
6
20172575137
import os import sys from typing import Optional from dotenv import load_dotenv from spinner import Spinner import actions import response_parser import speech import gpt message_history = [] GENERAL_DIRECTIONS_PREFIX = """ CONSTRAINTS: - Cannot run Python code that requires user input. ACTIONS: - "TELL_USER": tell the user something. The schema for the action is: TELL_USER: <TEXT> - "READ_FILE": read the current state of a file. The schema for the action is: READ_FILE: <PATH> - "WRITE_FILE": write a block of text to a file. The schema for the action is: WRITE_FILE: <PATH> ``` <TEXT> ``` - "RUN_PYTHON": run a Python file. The schema for the action is: RUN_PYTHON: <PATH> - "SEARCH_ONLINE": search online and get back a list of URLs relevant to the query. The schema for the action is: SEARCH_ONLINE: <QUERY> - EXTRACT_INFO: extract specific information from a webpage. The schema for the action is: EXTRACT_INFO: <URL>, <a brief instruction to GPT for information to extract> - "SHUTDOWN": shut down the program. The schema for the action is: SHUTDOWN RESOURCES: 1. File contents after reading file. 2. Online search results returning URLs. 3. Output of running a Python file. PERFORMANCE EVALUATION: 1. Continuously review and analyze your actions to ensure you are performing to the best of your abilities. 2. Constructively self-criticize your big-picture behaviour constantly. 3. Reflect on past decisions and strategies to refine your approach. 4. Every action has a cost, so be smart and efficent. Aim to complete tasks in the least number of steps. Write only one action. The action must one of the actions specified above and must be written according to the schema specified above. After the action, write a JSON object (parseable by Python's json.loads()) which must contain the following keys: - "reason": a short sentence explaining the action above - "plan": a short high-level plan in plain English """ FLAG_VERBOSE = "--verbose" FLAG_SPEECH = "--speech" FLAG_CONTINUOUS = "--continuous" def main(): general_directions = GENERAL_DIRECTIONS_PREFIX if FLAG_SPEECH in sys.argv[1:]: general_directions += '- "speak": a short summary of thoughts to say to the user' general_directions += "\n\n" general_directions += "If you want to run an action that is not in the above list of actions, send the SHUTDOWN action instead and explain in 'reason' which action you wanted to run.\n" general_directions += "So, write one action and one metadata JSON object, nothing else." load_dotenv() os.makedirs("workspace", exist_ok=True) os.chdir("workspace") new_plan: Optional[str] = None user_directions = input("What would you like me to do:\n") while True: print("========================") with Spinner("Thinking..."): assistant_response = gpt.chat(user_directions, general_directions, new_plan, message_history) if FLAG_VERBOSE in sys.argv[1:]: print(f"ASSISTANT RESPONSE: {assistant_response}") action, metadata = response_parser.parse(assistant_response) print(f"ACTION: {action.short_string()}") if FLAG_SPEECH in sys.argv[1:] and metadata.speak is not None: speech.say_async(metadata.speak) if isinstance(action, actions.ShutdownAction): print("Shutting down...") break else: print(f"REASON: {metadata.reason}") print(f"PLAN: {metadata.plan}") if FLAG_CONTINUOUS not in sys.argv[1:]: run_action = input("Run the action? [Y/n]") if run_action.lower() != "y" and run_action != "": break action_output = action.run() message_content = f"Action {action.key()} returned:\n{action_output}" message_history.append({"role": "system", "content": message_content}) change_plan = input("Change the proposed plan? [N/y]") if change_plan.lower() == "y": new_plan = input("What would you like me to change the plan to? ") else: new_plan = None if __name__ == "__main__": main()
rokstrnisa/RoboGPT
robogpt/main.py
main.py
py
4,135
python
en
code
264
github-code
6
25874708021
class MyClass: nome: str cognome: str def __init__(self, nome, cognome): self.nome = nome self.cognome = cognome mc = MyClass(nome = "Roberto", cognome = "Gianotto") print(mc) print(mc.nome) print(mc.cognome)
pinguinato/corso-python
esercizi/type_annotations/myclass.py
myclass.py
py
239
python
la
code
0
github-code
6
11000393367
import typing as T from datetime import datetime, timedelta from pydantic import BaseModel from mirai import ( Mirai, Member, Friend, MessageChain, At ) from .alias import MESSAGE_T # https://mirai-py.originpages.com/tutorial/annotations.html Sender = T.Union[Member, Friend] Type = str def reply(app: Mirai, sender: "Sender", event_type: "Type"): """app_reply = reply(app, sender, event_type) app_reply(message) """ async def wrapper(message: MESSAGE_T, *, at_sender: bool = False): if at_sender: if isinstance(message, list): message.insert(0, At(sender.id)) elif isinstance(message, MessageChain): message.__root__.insert(0, At(sender.id)) else: raise TypeError(f"not supported type for reply: {message.__class__.__name__}") if event_type == "GroupMessage": await app.sendGroupMessage(sender.group, message) elif event_type == "FriendMessage": await app.sendFriendMessage(sender, message) else: raise ValueError("Not supported event type") return wrapper def at_me(app: Mirai, message: MessageChain): at: T.Optional[At] = message.getFirstComponent(At) if at: return at.target == app.qq else: return False class CoolDown(BaseModel): """example: cd = CoolDown(app='app1', td=20) cd.update(123) cd.check(123) """ app: str td: float # timedelta value: T.Dict[int, datetime] = {} def update(self, mid: int) -> None: self.value.update({mid: datetime.now()}) def check(self, mid: int) -> bool: ret = datetime.now() >= self.value.get(mid, datetime.utcfromtimestamp(0)) + timedelta(seconds=self.td) return ret def shuzi2number(shuzi: T.Optional[str]) -> int: s = {'一': 1, '两': 2, '二': 2, '三': 3, '四': 4, '五': 5, '六': 6, '七': 7, '八': 8, '九': 9, '十': 10} if not shuzi: return 1 elif shuzi.isdecimal(): return int(shuzi) elif shuzi in s.keys(): return s[shuzi] else: return 1
Lycreal/MiraiBot
plugins/_utils/__init__.py
__init__.py
py
2,146
python
en
code
70
github-code
6
33608143285
# 1 Перевести строку в массив # "Robin Singh" => ["Robin”, “Singh"] # "I love arrays they are my favorite" => ["I", "love", "arrays", "they", "are", "my", "favorite"] rob = "Robin Singh" fav = "I love arrays they are my favorite" def robin(rob): rob = list(rob) return rob robin(rob) robin(fav) # 2 Дан список: [‘Ivan’, ‘Ivanou’], и 2 строки: Minsk, Belarus # Напечатайте текст: “Привет, Ivan Ivanou! Добро пожаловать в Minsk Belarus” a = ['Ivan', 'Ivanou'] b = 'Minsk' c = 'Belarus' def welcome(a, b, c): a = " ".join(a) print("Привет, {0}! Добро пожаловать в {1} {2}".format(a, b, c)) welcome(a, b, c) # 3 Дан список ["I", "love", "arrays", "they", "are", "my", "favorite"] сделайте из него # строку => "I love arrays they are my favorite" def faw(fav): fav = "".join(fav) return fav faw(fav) # 4 Создайте список из 10 элементов, вставьте на 3-ю позицию новое значение, # удалите элемент из списка под индексом 6 ex = ['cake', '20', 'ball', 'pill', 'love', 'like', '88', 'eight', ' apple', '8'] def ex_4(ex): ex.pop(6) ex[2] = 'doll' return ex ex_4(ex)
visek8/-QAP12OnlVikanas
home_work/hw_4/types.py
types.py
py
1,345
python
ru
code
0
github-code
6
74280782907
import pandas as pd import numpy as np from xgboost import XGBClassifier from metrics import macro_f1 import settings import pickle import gc import time class BCXGBTrainer: def __init__(self, config, logger): self.config = config self.model_params = config['model_params'] self.training_params = config['training_params'] self.logger = logger def train_and_validate(self, df): self.logger.info('Run training !') self.logger.info(f'config : {self.config}') xgb_oof = np.zeros((df.shape[0],)) xgb_oof_score = [] xgb_importances = pd.DataFrame() model_save_dir = settings.MODEL / self.model_params['model_save_dir'] model_save_dir.mkdir(parents=True, exist_ok=True) tabular_features = self.config['tabular_features'] target = self.training_params['target'] X = df[tabular_features] y = df[target] model = XGBClassifier(**self.training_params['best_params']) for fold in range(self.config['n_folds']): self.logger.info(f'Fold {fold} training ...') start_time = time.time() train_idx, valid_idx = df.loc[df['fold'] != fold].index, df.loc[df['fold'] == fold].index X_train, X_valid = X.iloc[train_idx], X.iloc[valid_idx] y_train, y_valid = y.iloc[train_idx], y.iloc[valid_idx] model.fit(X_train, y_train, eval_set=[(X_valid, y_valid)], **self.training_params['fit_params']) fi_tmp = pd.DataFrame() fi_tmp['feature'] = X_train.columns fi_tmp['importance'] = model.feature_importances_ fi_tmp['fold'] = fold fi_tmp['seed'] = self.config['seed'] xgb_importances = xgb_importances.append(fi_tmp) xgb_oof[valid_idx] = model.predict(X_valid) score = macro_f1(y.iloc[valid_idx], xgb_oof[valid_idx]) xgb_oof_score.append(score) model_save_path = model_save_dir / f'model_f{fold}_best.pkl' with open(model_save_path, 'wb') as f: pickle.dump(model, f, pickle.HIGHEST_PROTOCOL) elapsed = time.time() - start_time self.logger.info( f'[Fold {fold}] valid_macro_f1 : {score:.6f} | time : {elapsed:.0f}s') self.logger.info( f"[Fold {fold}] best model saved : {model_save_path}") self.logger.info('-'*100) self.logger.info( f'Average best valid_macro_F1 Score: {np.mean(xgb_oof_score):.6f}') del model gc.collect() def inference(self, df_test): xgb_preds = np.zeros((df_test.shape[0], )) tabular_features = self.config['tabular_features'] X_test = df_test[tabular_features] for fold in range(self.config['n_folds']): start_time = time.time() model_save_path = settings.MODEL / \ self.model_params['model_save_dir'] / f'model_f{fold}_best.pkl' model = pickle.load(open(model_save_path, 'rb')) xgb_preds += model.predict_proba(X_test)[:, 1] / \ self.config['n_folds'] elapsed = time.time() - start_time self.logger.info( f'[model_f{fold}_best] inference time : {elapsed:.0f}s') del model gc.collect() xgb_preds = np.expand_dims(xgb_preds, axis=1) preds_save_path = settings.MODEL / \ self.model_params['model_save_dir'] / f'preds.npy' np.save(preds_save_path, xgb_preds) self.logger.info( f'Prediction result saved : {preds_save_path}') def save_oof(self, df): xgb_oof = np.zeros((df.shape[0], )) xgb_oof_score = [] tabular_features = self.config['tabular_features'] target = self.training_params['target'] X = df[tabular_features] y = df[target] for fold in range(self.config['n_folds']): start_time = time.time() model_save_path = settings.MODEL / \ self.model_params['model_save_dir'] / f'model_f{fold}_best.pkl' model = pickle.load(open(model_save_path, 'rb')) valid_idx = df.loc[df['fold'] == fold].index X_valid = X.iloc[valid_idx] xgb_oof[valid_idx] = model.predict_proba(X_valid)[:, 1] score = macro_f1(y.iloc[valid_idx], np.where( xgb_oof[valid_idx] > 0.5, 1, 0)) xgb_oof_score.append(score) elapsed = time.time() - start_time self.logger.info( f'[model_f{fold}_best] valid_macro_f1 : {score:.6f} | time : {elapsed:.0f}s') del model gc.collect() xgb_oof = np.expand_dims(xgb_oof, axis=1) oof_save_path = settings.MODEL / \ self.model_params['model_save_dir'] / f'oof.npy' np.save(oof_save_path, xgb_oof) self.logger.info( f'Validation result saved : {oof_save_path}')
lim-hyo-jeong/DACON-Breast-Cancer
xgb_trainer.py
xgb_trainer.py
py
5,105
python
en
code
4
github-code
6
36030628166
"""Countdown/Stopwatch functionalities.""" import subprocess import threading import time import traceback from abc import ( ABC, abstractmethod, ) from pathlib import Path from typing import ( List, Optional, Union, ) from overrides import overrides import albert as v0 import gi # isort:skip gi.require_version("Notify", "0.7") # isort:skip from gi.repository import ( GdkPixbuf, Notify, ) # isort:skip __title__ = "Countdown/Stopwatch functionalities" __version__ = "0.4.0" __triggers__ = "clock " __authors__ = "Nikos Koukis" __homepage__ = ( "https://github.com/bergercookie/awesome-albert-plugins/blob/master/plugins/clock" ) countdown_path = str(Path(__file__).parent / "countdown.png") stopwatch_path = str(Path(__file__).parent / "stopwatch.png") sound_path = Path(__file__).parent.absolute() / "bing.wav" cache_path = Path(v0.cacheLocation()) / "clock" config_path = Path(v0.configLocation()) / "clock" data_path = Path(v0.dataLocation()) / "clock" dev_mode = True # plugin main functions ----------------------------------------------------------------------- def play_sound(num): for x in range(num): t = threading.Timer(0.5 * x, lambda: subprocess.Popen(["cvlc", sound_path,]),) t.start() def notify( app_name: str, msg: str, image=None, ): Notify.init(app_name) n = Notify.Notification.new(app_name, msg, image) n.show() def format_time(t: float): """Return the string representation of t. t must be in *seconds*""" if t >= 60: return f"{round(t / 60.0, 2)} mins" else: return f"{round(t, 2)} secs" def play_icon(started) -> str: return "▶️" if started else "⏸️" class Watch(ABC): def __init__(self, name): self._name = name if name is not None else "" self._to_remove = False def name(self,) -> Optional[str]: return self._name @abstractmethod def start(self): pass def started(self) -> bool: pass return self._started @abstractmethod def pause(self): pass @abstractmethod def notify(self): pass def to_remove(self,) -> bool: return False class Stopwatch(Watch): def __init__(self, name=None): super(Stopwatch, self).__init__(name=name) self.total_time = 0 self.latest_start = 0 self._started = False self.latest_stop_time = 0 @overrides def start(self): self.latest_start = time.time() self._started = True self.notify(msg=f"Stopwatch [{self.name()}] starting") @overrides def pause(self): stop_time = time.time() self.total_time += stop_time - self.latest_start self._started = False self.notify( msg=f"Stopwatch [{self.name()}] paused, total: {format_time(self.total_time)}" ) self.latest_stop_time = stop_time @overrides def notify(self, msg): notify( app_name="Stopwatch", msg=msg, image=stopwatch_path, ) @classmethod def icon(cls): return stopwatch_path def destroy(self): pass def __str__(self): # current interval if self.started(): latest = time.time() else: latest = self.latest_stop_time current_interval = latest - self.latest_start total = self.total_time + current_interval s = get_as_subtext_field(play_icon(self._started)) s += get_as_subtext_field(self.name()) s += get_as_subtext_field(format_time(total), "Total",) s += get_as_subtext_field(format_time(current_interval), "Current Interval",)[:-2] return s class Countdown(Watch): def __init__( self, name: str, count_from: float, ): super(Countdown, self).__init__(name=name) self.latest_start = 0 self.remaining_time = count_from self._started = False self.timer = None @overrides def start(self): self._started = True self.latest_start = time.time() self.timer = threading.Timer(self.remaining_time, self.time_elapsed,) self.timer.start() self.notify( msg=f"Countdown [{self.name()}] starting, remaining: {format_time(self.remaining_time)}" ) @overrides def pause(self): self._started = False self.remaining_time -= time.time() - self.latest_start if self.timer: self.timer.cancel() self.notify( msg=f"Countdown [{self.name()}] paused, remaining: {format_time(self.remaining_time)}" ) def time_elapsed(self): self.notify(msg=f"Countdown [{self.name()}] finished") play_sound(1) self._to_remove = True @classmethod def icon(cls): return countdown_path def destroy(self): self.timer.cancel() self.notify(msg=f"Cancelling [{self.name()}]") @overrides def notify(self, msg): notify( app_name="Countdown", msg=msg, image=countdown_path, ) def __str__(self): s = get_as_subtext_field(play_icon(self._started)) s += get_as_subtext_field(self.name()) # compute remaining time remaining_time = self.remaining_time if self.started(): remaining_time -= time.time() - self.latest_start s += f"Remaining: {format_time(remaining_time)}" return s countdowns: List[Countdown] = [] stopwatches: List[Stopwatch] = [] def all_watches() -> List[Union[Countdown, Stopwatch]]: return [ *countdowns, *stopwatches, ] def create_stopwatch(name, *query_parts): stopwatches.append(Stopwatch(name=name)) stopwatches[-1].start() def create_countdown(name, *query_parts): t = float(query_parts[0].strip()) * 60 countdowns.append(Countdown(name=name, count_from=t,)) countdowns[-1].start() def delete_item(item: Union[Stopwatch, Countdown]): item.destroy() # TODO: could be neater.. if isinstance(item, Stopwatch): stopwatches.remove(item) else: countdowns.remove(item) def initialize(): """Called when the extension is loaded (ticked in the settings) - blocking.""" # create plugin locations for p in ( cache_path, config_path, data_path, ): p.mkdir( parents=False, exist_ok=True, ) def finalize(): pass def handleQuery(query,) -> list: """Hook that is called by albert with *every new keypress*.""" # noqa results = [] if query.isTriggered: try: query.disableSort() results_setup = setup(query) if results_setup: return results_setup query_parts = query.string.strip().split() name = None if query_parts: name = query_parts.pop(0) subtext = f'Name: {name if name else "Not given"}' results.extend( [ v0.Item( id=__title__, icon=countdown_path, text="Create countdown", subtext=f'{subtext}{" - <u>Please provide a duration</u>" if not query_parts else ""}', completion=__triggers__, actions=[ v0.FuncAction( "Create countdown", lambda name=name, query_parts=query_parts: create_countdown( name, *query_parts, ), ) ], ), v0.Item( id=__title__, icon=stopwatch_path, text="Create stopwatch", subtext=subtext, completion=__triggers__, actions=[ v0.FuncAction( "Create stopwatch", lambda name=name, query_parts=query_parts: create_stopwatch( name, *query_parts, ), ) ], ), ] ) # cleanup watches that are done for li in [ countdowns, stopwatches, ]: for watch in li: if watch.to_remove(): li.remove(watch) results.extend([get_as_item(item) for item in all_watches()]) except Exception: # user to report error if dev_mode: # let exceptions fly! print(traceback.format_exc()) raise results.insert( 0, v0.Item( id=__title__, icon=countdown_path, text="Something went wrong! Press [ENTER] to copy error and report it", actions=[ v0.ClipAction( f"Copy error - report it to {__homepage__[8:]}", f"{traceback.format_exc()}", ) ], ), ) return results # supplementary functions --------------------------------------------------------------------- def get_as_item(item: Union[Countdown, Stopwatch]): """Return an item - ready to be appended to the items list and be rendered by Albert.""" actions = [v0.FuncAction("Remove", lambda: delete_item(item),)] if item.started(): actions.append(v0.FuncAction("Pause", lambda: item.pause(),)) else: actions.append(v0.FuncAction("Resume", lambda: item.start(),)) return v0.Item( id=__title__, icon=countdown_path if isinstance(item, Countdown) else stopwatch_path, text=str(item), subtext="", completion=__triggers__, actions=actions, ) def get_as_subtext_field(field, field_title=None) -> str: """Get a certain variable as part of the subtext, along with a title for that variable.""" s = "" if field: s = f"{field} | " else: return "" if field_title: s = f"{field_title}: " + s return s def save_data(data: str, data_name: str): """Save a piece of data in the configuration directory.""" with open(config_path / data_name, "w",) as f: f.write(data) def load_data(data_name,) -> str: """Load a piece of data from the configuration directory.""" with open(config_path / data_name, "r",) as f: data = f.readline().strip().split()[0] return data def setup(query): """Setup is successful if an empty list is returned. Use this function if you need the user to provide you data """ results = [] return results
ppablocruzcobas/Dotfiles
albert/clock/__init__.py
__init__.py
py
11,096
python
en
code
2
github-code
6
28313903181
from PyQt5.QtWidgets import QMainWindow, QApplication, QPushButton, QMenuBar, QAction, QTextEdit, QHBoxLayout, QWidget, QFontDialog, QColorDialog, QFileDialog, QDialog, QVBoxLayout, QMessageBox from PyQt5 import QtGui, QtCore from PyQt5.QtGui import QIcon from PyQt5.QtPrintSupport import QPrinter, QPrintDialog, QPrintPreviewDialog from PyQt5.QtCore import QFileInfo import sys class Window(QMainWindow): # Klasse Fenster def __init__(self): super().__init__() self.title = ('Einfacher Text Editor mit PDF Funktion') # Window Title self.top = 400 # self.left = 600 # Abstand self.width = 400 # self.height = 300 # self.iconName = 'win.png' #Icon self.setWindowIcon(QIcon(self.iconName)) self.setWindowTitle(self.title) self.setGeometry(self.left, self.top, self.width, self.height) self.createEditor() # Anzeigen von Editor self.CreateMenu() # Anzeigen von der Menü Bar self.show() #--------------------------------- M e n ü B a r -------------------------------# def CreateMenu(self): mainMenu = self.menuBar() fileMenu = mainMenu.addMenu("Datei") editMenu = mainMenu.addMenu("Bearbeiten") infoMenu = mainMenu.addMenu("Info") helpAction = QAction(QtGui.QIcon(""), 'Help', self) helpAction.setShortcut("") helpAction.triggered.connect(self.helpAction) infoMenu.addAction(helpAction) # Öffnen openAction = QAction(QIcon("open.png"), 'Öffnen', self) openAction.setShortcut("") openAction.triggered.connect(self.openAction) fileMenu.addAction(openAction) # Öffnen saveAction = QAction(QIcon("save.png"), 'Speichern unter', self) saveAction.setShortcut("") saveAction.triggered.connect(self.saveAction) fileMenu.addAction(saveAction) # Speichern printAction = QAction(QIcon("print.png"), 'Drucken', self) printAction.setShortcut("") printAction.triggered.connect(self.printDialog) fileMenu.addAction(printAction) # Drucken printpreviewAction = QAction(QIcon("preprint.png"), 'Druckvorschau', self) printpreviewAction.triggered.connect(self.printPreviewDialog) fileMenu.addAction(printpreviewAction) # Vorschau Druck pdfAction = QAction(QIcon("pdf.png"), 'PDF Exportieren', self) pdfAction.triggered.connect(self.pdfExport) fileMenu.addAction(pdfAction) # Vorschau Druck exitAction = QAction(QIcon("exit.png"), 'Beenden', self) exitAction.setShortcut("") exitAction.triggered.connect(self.exitWindow) fileMenu.addAction(exitAction) # Beenden editAction = QAction(QIcon("edit.png"), 'Schrift', self) editAction.setShortcut("") editAction.triggered.connect(self.fontDialog) editMenu.addAction(editAction) # Bearbeiten colorAction = QAction(QIcon("color.png"), 'Schrift Farbe', self) # Schrift Farbe colorAction.triggered.connect(self.colorDialog) editMenu.addAction(colorAction) #------------------------ Exit Button funktion ----------------------------------# def exitWindow(self): self.close() #-------------------------Text Editor---------------------------------------------# def createEditor(self): self.textEdit = QTextEdit(self) self.setCentralWidget(self.textEdit) #------------------------Schrift Dialog------------------------------------------# def fontDialog(self): font, ok = QFontDialog.getFont() if ok: self.textEdit.setFont(font) #----------------------- Schrift Farbe Dialog ----------------------------------# def colorDialog(self): color = QColorDialog.getColor() self.textEdit.setTextColor(color) #----------------------------Drucken der Datei---------------------------------# def printDialog(self): printer = QPrinter(QPrinter.HighResolution) dialog = QPrintDialog(printer, self) if dialog.exec_() == QPrintDialog.Accepted: self.textEdit.print_(printer) #--------------------------Druck Vorschau---------------------------------------# def printPreviewDialog(self): printer = QPrinter(QPrinter.HighResolution) previewDialog = QPrintPreviewDialog(printer, self) previewDialog.paintRequested.connect(self.printPreview) previewDialog.exec_() def printPreview(self, printer): self.textEdit.print_(printer) #-------------------------PDF Exporter-----------------------------------------# def pdfExport(self): fn, _= QFileDialog.getSaveFileName(self, "Export PDF", None, "PDF files (.pdf);;All Files()") if fn != '': if QFileInfo(fn).suffix() == "" :fn += '.pdf' printer = QPrinter(QPrinter.HighResolution) printer.setOutputFormat(QPrinter.PdfFormat) printer.setOutputFileName(fn) self.textEdit.document ().print_(printer) #-------------------------------Datei Laden------------------------------------# def openAction(self): fname = QFileDialog.getOpenFileName(self, 'Open file', '/home') if fname[0]: f = open(fname[0], 'r') with f: data = f.read() self.textEdit.setText(data) #------------------------------Datei Speichern---------------------------------# def saveAction(self): filename, _ = QFileDialog.getSaveFileName(self, 'Datei Speichern', ".txt", "Alle Datein (*);; Text Datei (*.txt)") if filename: with open(filename, "w") as file: file.write(self.textEdit.toPlainText()) file.close() #-----------------------------Message Box-------------------------------------# def helpAction(self): QMessageBox.about(self, "Entwickelt mit QT5", "Alpha 1.0") #------------------------------Ende-------------------------------------------# App = QApplication(sys.argv) Window = Window() sys.exit(App.exec_())
schnuppi1984/Easy-Text-Editor
start.py
start.py
py
6,702
python
en
code
0
github-code
6
25159533855
import psycopg2 import random con=psycopg2.connect('dbname=ecommerce_db user=postgres port=5432 host=localhost password=Murad2004') cur=con.cursor() def show(cursor): cur.execute(query) length = 30 print(*[desc[0].ljust(30) for desc in cursor.description], sep='') print('-'*140) result = cur.fetchall() for row in result: for col in row: print(str(col).ljust(length)[:37], end='') print() # query=""" # CREATE TABLE seller( # id SERIAL PRIMARY KEY, # name VARCHAR(50) # ); # CREATE TABLE product( # id SERIAL PRIMARY KEY, # title VARCHAR(50) NOT NULL, # price NUMERIC NOT NULL, # seller_id INT, # CONSTRAINT fk_seller # FOREIGN KEY(seller_id) # REFERENCES seller(id) # ON DELETE CASCADE # ); # CREATE TABLE tag( # id SERIAL PRIMARY KEY, # title VARCHAR(50) NOT NULL # ); # CREATE TABLE customer( # id SERIAL PRIMARY KEY, # name VARCHAR(50) # ); # CREATE TABLE wishlist( # id SERIAL PRIMARY KEY, # customer_id INT, # CONSTRAINT fk_customer # FOREIGN KEY(customer_id) # REFERENCES customer(id) # ON DELETE CASCADE # ); # CREATE TABLE wishlist_products( # id SERIAL PRIMARY KEY, # product_id INT, # customer_id INT, # CONSTRAINT fk_customer # FOREIGN KEY(customer_id) # REFERENCES customer(id) # ON DELETE CASCADE, # CONSTRAINT fk_product # FOREIGN KEY(product_id) # REFERENCES product(id) # ON DELETE CASCADE # ); # CREATE TABLE review( # id SERIAL PRIMARY KEY, # rate NUMERIC, # customer_id INT, # product_id INT, # CONSTRAINT fk_customer # FOREIGN KEY(customer_id) # REFERENCES customer(id) # ON DELETE SET NULL, # CONSTRAINT fk_product # FOREIGN KEY(product_id) # REFERENCES product(id) # ON DELETE CASCADE # ); # CREATE TABLE product_tags( # id SERIAL PRIMARY KEY, # product_id INT, # tag_id INT, # CONSTRAINT fk_product_tag # FOREIGN KEY(product_id) # REFERENCES product(id) # ON DELETE CASCADE, # CONSTRAINT fk_tag_product # FOREIGN KEY(tag_id) # REFERENCES tag(id) # ON DELETE CASCADE # ); # """ customer_data=[{ "name": "Halette Milberry" }, { "name": "Barby Wastell" }, { "name": "Lexie Dragon" }, { "name": "Rosamond Kynston" }, { "name": "Christen Keyson" }, { "name": "Madeline Knottley" }, { "name": "Ruby Loachhead" }, { "name": "Aeriel Knowlden" }, { "name": "Hedy Phillipp" }, { "name": "Harmonia Freckelton" }, { "name": "Rossy Mustchin" }, { "name": "Dulcie Higgonet" }, { "name": "Kala Caldroni" }, { "name": "Nessie Lavery" }, { "name": "Shanta Polotti" }, { "name": "Berty Dampier" }, { "name": "Frans Fosdike" }, { "name": "Lotty Corkhill" }, { "name": "Randie Lawther" }, { "name": "Husain Reye" }, { "name": "Fayre McPhillimey" }, { "name": "Susette Raitie" }, { "name": "Sela Elsmore" }, { "name": "Taddeo Enterlein" }, { "name": "Valma Hutchence" }, { "name": "Micki Gorelli" }, { "name": "Arabelle Najera" }, { "name": "Annemarie Crenage" }, { "name": "Nara Whight" }, { "name": "Borg Downage" }, { "name": "Sheri Moreman" }, { "name": "Hew Dignum" }, { "name": "Jacquenette Caygill" }, { "name": "Margot Cradduck" }, { "name": "Adele Snassell" }, { "name": "Caryl Pevsner" }, { "name": "Gannon Northrop" }, { "name": "Artemas Goodlip" }, { "name": "Lawrence Crockatt" }, { "name": "Sheelagh Cosely" }, { "name": "Doralyn Tripett" }, { "name": "Grove Learman" }, { "name": "Rosanna Pretious" }, { "name": "Earle Sapshed" }, { "name": "Guido Onyon" }, { "name": "Rolfe Panner" }, { "name": "Hilly Dashwood" }, { "name": "Orland Shutt" }, { "name": "Kipp Blacksell" }, { "name": "Umberto Chaman" }] # query=""" # INSERT INTO customer(name) VALUES(%s); # """ # for i in customer_data: # cur.execute(query,(i['name'],)) # query="SELECT * FROM customer" seller_data=[ { "name": "Si Friary" }, { "name": "Scotty Ludlem" }, { "name": "Randa Ifill" }, { "name": "Vanessa Fay" }, { "name": "Tamarra Tossell" }, { "name": "Kennett Dumper" }, { "name": "Jessika Stienham" }, { "name": "Perry Branscombe" }, { "name": "Salaidh Schultz" }, { "name": "Nicolis Stonman" }, { "name": "Michale Brecknock" }, { "name": "Marian Withinshaw" }, { "name": "Lynea Benit" }, { "name": "Cale Giacometti" }, { "name": "Ave Jahnisch" }, { "name": "Aurelea Adshed" }, { "name": "Pavlov Borham" }, { "name": "Lamont McCanny" }, { "name": "Rustie Troyes" }, { "name": "Ivory Vina" }] # query=""" # INSERT INTO seller(name) VALUES(%s); # """ # for i in seller_data: # cur.execute(query,(i["name"],)) # query="SELECT * FROM seller" # cur.execute(query) tag_data=[ { "title": "Cheese" }, { "title": "Chocolate" }, { "title": "Vanillia" }, { "title": "Vegetable" }, { "title": "Vegan" }, { "title": "Healthy" }, { "title": "Fit" }, { "title": "Meal" }, { "title": "Fast Food" } ] # query=""" # INSERT INTO tag(title) VALUES(%s); # """ # for i in tag_data: # cur.execute(query,(i['title'],)) # query='SELECT * FROM tag' seller_ids=[] for i in range(len(seller_data)): seller_ids.append(i+1) product_data=[ { "title": "M&M Food Market", "price": "17.0616609356653" }, { "title": "Soprole", "price": "11.6234613464323" }, { "title": "Kinder", "price": "2.62073436454904" }, { "title": "Andy Capp's fries", "price": "14.6864611770429" }, { "title": "Bewley's", "price": "7.01804420073426" }, { "title": "Vitta Foods", "price": "4.5093621385793" }, { "title": "Taco Bell", "price": "19.1318949810843" }, { "title": "Sun-Pat", "price": "9.6603184191791" }, { "title": "Baskin robbins", "price": "16.105171543595" }, { "title": "Wendy's", "price": "5.43620887838128" }, { "title": "Cobblestone", "price": "7.22419333514953" }, { "title": "Wonder Bread", "price": "14.6278888390529" }, { "title": "Lavazza", "price": "10.305469252777" }, { "title": "Kinder", "price": "19.4697343713929" }, { "title": "Soprole", "price": "16.3448767300439" }, { "title": "Nabisco", "price": "2.48867588838966" }, { "title": "Tic Tac", "price": "2.60812248457601" }, { "title": "Magnum", "price": "19.4421954995218" }, { "title": "Papadopoulos", "price": "19.4472127819654" }, { "title": "Wonder Bread", "price": "12.7520409541913" }, { "title": "Papadopoulos", "price": "1.811215852765" }, { "title": "Olymel", "price": "7.34511601847835" }, { "title": "Domino", "price": "7.64364533249459" }, { "title": "Pizza Hut", "price": "12.6648227300797" }, { "title": "Red Lobster", "price": "10.0007594130005" }, { "title": "Andy Capp's fries", "price": "18.5981898673802" }, { "title": "Secret Recipe", "price": "18.6991437984161" }, { "title": "Sun-Pat", "price": "3.15631274094633" }, { "title": "Magnum", "price": "10.3542353042188" }, { "title": "Heinz", "price": "17.7369680049536" }, { "title": "Olymel", "price": "19.9154627821015" }, { "title": "Taco Bell", "price": "10.9514749045258" }, { "title": "Dunkin' Donuts", "price": "11.479457990024" }, { "title": "Applebee's", "price": "15.7718961763996" }, { "title": "Knorr", "price": "10.4961827092321" }, { "title": "KFC", "price": "12.4794360452702" }, { "title": "Domino", "price": "17.0641279993877" }, { "title": "Knorr", "price": "2.66790023197788" }, { "title": "Kits", "price": "18.8862874209351" }, { "title": "Dunkin' Donuts", "price": "7.84475450163929" }, { "title": "Applebee's", "price": "13.4456292886499" }, { "title": "Nutella", "price": "4.63776473637566" }, { "title": "Bewley's", "price": "13.0057596485157" }, { "title": "Kits", "price": "1.38640394266062" }, { "title": "Nesquik", "price": "6.1496629436266" }, { "title": "KFC", "price": "15.6723103028128" }, { "title": "Andy Capp's fries", "price": "17.8805946269448" }, { "title": "Tic Tac", "price": "7.01679017348997" }, { "title": "Andy Capp's fries", "price": "7.87038087466284" }, { "title": "Bel Group", "price": "10.6127773935966" } ] # query=""" # INSERT INTO product(title,price,seller_id) VALUES(%s,%s,%s); # """ # for i in product_data: # cur.execute(query,(i['title'],i['price'],random.choice(seller_ids))) # query="SELECT * FROM product" customers_ids=[] for i in range(len(customer_data)): customers_ids.append(i+1) # query=""" # INSERT INTO wishlist(customer_id) VALUES(%s); # """ # for i in customer_data: # cur.execute(query,(random.choice(customers_ids),)) # query="SELECT * FROM wishlist" # rate NUMERIC, # # customer_id INT, # # product_id INT, # query=""" # INSERT INTO review(rate,customer_id,product_id) VALUES(%s,%s,%s); # """ # for i in customer_data: # cur.execute(query,(random.randint(1,5),random.choice(customers_ids),random.randint(1,len(product_data)))) # query='SELECT * FROM review' # product_id INT, # # customer_id INT, # query=""" # INSERT INTO wishlist_products(product_id,customer_id) VALUES(%s,%s); # """ # for i in customer_data: # cur.execute(query,(random.randint(1,len(product_data)),random.choice(customers_ids))) # query='SELECT * FROM wishlist_products' # query=""" # INSERT INTO product_tags(product_id,tag_id) VALUES(%s,%s); # """ # for i in product_data: # cur.execute(query,(random.randint(1,len(product_data)),random.randint(1,len(tag_data)))) # query='SELECT * FROM product_tags' # query=""" # SELECT * # FROM product_tags pt # LEFT JOIN tag t ON pt.tag_id = t.id # WHERE pt.product_id = 5; # """ # # query='SELECT * FROM product' # query=""" # SELECT * # FROM product # LEFT JOIN seller ON product.seller_id = seller.id # WHERE seller.id = 5; # """ # query=""" # SELECT * # FROM wishlist_products # LEFT JOIN product ON wishlist_products.product_id = product.id # WHERE wishlist_products.customer_id = 2; # """ # query=""" # SELECT p.id, p.title # FROM product p # LEFT JOIN review r ON p.id = r.product_id # GROUP BY p.id, p.title # ORDER BY rate DESC # LIMIT 10; # """ # # query=''' # # SELECT * FROM review LEFT JOIN product ON product_id=product.id WHERE product_id=2 ; # # ''' # # WHERE product_id IN (SELECT AVG(rate) FROM review GROUP BY product_id ORDER BY AVG(rate) DESC) # # query="SELECT * FROM product" #Burdan basliyir # Bir teq seçin və həmin teqin məhsullarını göstərin query="""SELECT * FROM product LEFT JOIN product_tags on product_tags.product_id=product.id WHERE product_tags.tag_id=5""" # Bir məhsul seçin və həmin məhsulların teqlərini göstərin # query="""SELECT * FROM product_tags # LEFT JOIN product on product.id=product_tags.product_id WHERE product.id=5 # """ # Bir satıcı seçin və həmin satıcının məhsullarını göstərin # query=""" # SELECT * FROM product # LEFT JOIN seller on seller.id=product.seller_id WHERE seller.id=5 # """ # Bir müştəri seçin və həmin müştərinin wishlistindəki məhsulları göstərin # query=""" # SELECT * FROM wishlist_products # LEFT JOIN customer on wishlist_products.customer_id=customer.id WHERE customer.id=45 # """ # Review ortalaması ən yüksək olan 10 məhsulu həmin ortalama ilə birlikdə göstərin # query="""SELECT AVG(rate),product.id FROM product # LEFT JOIN review on product.id=review.product_id GROUP BY product.id ORDER BY AVG(rate) LIMIT 10""" # teqləri məhsullarının sayına görə düzün və bunu edərkən də məhsulların sayı da görünsün # query=""" # SELECT COUNT(product_tags.product_id),product_tags.tag_id FROM product_tags LEFT JOIN tag on product_tags.tag_id=tag.id GROUP BY product_tags.tag_id ORDER BY COUNT(product_tags.product_id) DESC # """ # Wishlistindəki məhsulların toplam qiyməti ən çox olan 10 müşətirini göstərin. Bunu edərkən həmin qiymət toplamı da görünsün # query=""" # SELECT customer.id,SUM(wishlist_products.product_id) FROM customer LEFT JOIN wishlist_products on customer.id=wishlist_products.customer_id GROUP BY customer.id HAVING SUM(wishlist_products.product_id) IS NOT NULL ORDER BY SUM(wishlist_products.product_id) DESC LIMIT 10 # """ # id-lərinə görə ilk 10 satıcının məlumatlarını və həmin satıcının məhsullarına gələn reviewların ortalamasını göstərin query=""" SELECT customer.id, AVG(rate) FROM customer LEFT JOIN review on customer.id=review.customer_id GROUP BY customer.id HAVING AVG(rate) IS NOT NULL ORDER BY AVG(rate) DESC LIMIT 10 """ show(cur) con.commit()
MuradAsadzade/Postresql-join-tasks
ecommerce.py
ecommerce.py
py
13,617
python
en
code
0
github-code
6
21322953683
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Camel: A dos game ported to a cross platform solution. Was originally: Camel Source Code for the BrailleNote, written in Rapid Euphoria Original author: Louis Bryant Modified by Nathaniel Schmidt <[email protected]> Date modified: 09/09/2020; 23/01/2021; 03/02/2022 You have permission to modify and redistribute this code and software with or without changes. Pulse Data International, Rapid Deployment Software, Programmers of other included files, and I take no responsibility for damages you cause by your modifying this software. This code and software is provided 'as is' without any implied or express warranty. """ from random import randint from time import sleep # First, let's declare some global variables - bad practice but easier when translating from such a basic language such as euphoria: you = 0 # Where you are. hyenas = 0 # The hyenas location. drinks = 0 # How many drinks you have left. gocommands = 0 # How many commands you have before you need another drink. days = 0 # How many good days your camel has left. n = 0 # Temporary random number usages. mainInput = None # Stores the user presses here. gameLost = False# Whether you have lost, mainly for the printLoss function and main game loop. def queryInstructions (prompt): """Ask the user whether they want instructions, then recursively query the user for instructions until the user declines. @param prompt: The output prompt containing the query for the user to answer. @type prompt: str""" global mainInput instructions = """Welcome to the game of Camel. The object of the game is to travel 200 miles across the Great Desert. A pack of nasty, ravenous hyenas will be chasing you. You will be asked for commands every so often. C O M M A N D S: 1 -- drink from your canteen, 2 -- move ahead moderate speed, 3 -- move ahead fast speed, 4 -- stop for a rest, 5 -- status check, 6 -- hope for help, 7 -- exit, And 8 -- request help to list available commands. You will get a quart of water which will last you six drinks. You must renew your water supply at an Oases completely. You get a half quart if found by help. If help does not find you after command '6', you lose.""" mainInput = input(prompt) mainInput = mainInput.upper () while mainInput != "Y" and mainInput != "N": print ("Please enter either 'y' or 'n'") mainInput = input("Would you like instructions? Type Y for yes or N for no. ") mainInput = mainInput.upper () if mainInput == 'Y': print (instructions) queryInstructions ("Would you like to hear the instructions again? Type Y for yes or N for no.") else: print("Good luck and good cameling! ") # Now, let's initialize the variables: def init(): """Initialise global variable identifiers with required initial value assignments to allow the game to start.""" global you global hyenas global drinks global gocommands global days you = 0 # You haven't gone anywhere. hyenas = 25 # The hyenas are 25 miles ahead of you. drinks = 6 # You have six drinks left in your canteen. gocommands = 4 # You have 4 commands without drinking. days = 7 # Your camel has 7 good days left. def printLoss (): """Print a random loss message from a randomised selection.""" global n n = randint(1, 4) # We have four loser statements. print("Your body and soul lay a rest in the sand. ") if n == 1: # This is the first loser statement. print("The National's Camel Union is not attending your funeral!!!!!! ") elif n == 2: # This is the second loser statement. print("Your body was eaten by voltures and hyenas!!!!!! ") elif n == 3: # This is the fourth loser statement. print("People with little inteligence should stay out of the desert. ") elif n == 4: # This is the last loser statement. print("Turkeys should fly, not ride camels. ") # No more loser statements. def queryReplay (): """Ask whether to play the game again or exit.""" global gameLost global mainInput if gameLost == True: printLoss () mainInput = input ("Want another camel and a new game? (Pres Y for yes or N for no) ") mainInput = mainInput.upper () while mainInput != 'Y' and mainInput != 'N': print ("Please enter either 'Y' or 'N'") mainInput = input ("Want another game? (Pres Y for yes or N for no) ") mainInput = mainInput.upper () if mainInput == 'Y': gameLost = False main () else: print ("Chicken!") exit () def gameStatus (): """Figure out what to do based on the current state of global vars.""" global you global hyenas global drinks global gocommands global days global gameLost # Check where you are before letting you proceed. # Did you win? Or did the hyenas capture you? # Or, maybe, you are still alive. if you > 199: # You made it! print("YOU WIN! A party is given in your honor! ") print("The hyenas have been tamed and are planning to attend. ") queryReplay () if you > hyenas: # You are ahead of the hyenas. # Let them move. hyenas += randint(1, 20) # Move at a random speed. if hyenas >= you and you >30: print("THE hyenas HAVE CAPTURED YOU!") print ("CAMEL AND PEOPLE SOUP IS THEIR FAVORITE DISH. ") gameLost = True queryReplay () if gocommands < 3: # You had better get a drink. print("W A R N I N G -- GET A DRINK ") if gocommands < 0: # Too many commands without drinking. print("YOU RAN OUT OF WATER... SORRY CHUM!!!!!! ") gameLost = True queryReplay () # What about your camel? if days < 1: # You ran your camel to death! print("YOU DIRTY LOUSY RAP SCALLION!!! ") print("YOU RAN YOUR INNOCENT CAMEL TO DEATH! ") gameLost = True queryReplay () # Well? Let's continue! if you == 0: # You are just starting. print("You are in the middle of the desert at an oases. ") if you > 25: hyenas += randint(1, 10) print("The hyenas are {0} miles behind you.".format(you-hyenas)) print("You have travelled {0} miles altogether, and have {1} more miles to go.".format (you, 200-you)) # Now let's start the game. def main (): """Main procedure for the game.""" global you global hyenas global drinks global gocommands global days global n global gameLost global mainInput print("Welcome to The Game Of Camel. ") queryInstructions ("Would you like to hear game instructions? Type Y for yes or N for no.") init() # Call the function to initialize the variables. gameStatus () while gameLost != True: while True: try: mainInput = int(input("Your command?")) break except ValueError: print ("Make sure you only enter a number.") continue if mainInput == 1: # Have a drink # Drink from your canteen. if drinks == 0: print("YOU RAN OUT OF WATER. SORRY CHUM!!!!!! ") gameLost = True queryReplay () else: # Get a drink? drinks -= 1 print("BETTER WATCH FOR A OASIS. ") gocommands = 4 # Reset how many commands you can go before drinking. gameStatus () elif mainInput == 2: # Walk normally. you += randint(1, 5) # Move randomly from 1 to 5 miles. days -= 1 # Subtract one day from the camel. print("Your camel likes this pace! ") gocommands -= 1 # Subtract commands you have before drinking. gameStatus () elif mainInput == 3: # So try to run! gocommands -= 1 # You wasted one more command. gameStatus () n = randint(1, 4) # What happens here? # Let's see. if n == 1: # The computer chose the first action. # The first action is a sand-storm. print("YOU HAVE BEEN CAUGHT IN A SAND-STORM... ") print("GOOD LUCK! ") you += randint(1, 5) # Slow down. gameStatus () elif n == 2: # The Note-taker chose to perform the second action. This action is to let your camel find an oases. print("You have stopped at an Oases. Your camel is filling your canteen and eating figs. ") drinks = 6 # Put six more drinks in the canteen. gocommands = 4 # Reset the commands. gameStatus () n = 4 # Force the Note-taker to do the last action. elif n == 3: # Oops! The Note-taker chose the third action. This action gets you caught by a hidden crazy kidnapper. print("YOU HAVE BEEN CAPTURED BY some HIDDEN CRAZY KIDNAPPERS. ") print("Luckily the local council has agreed to their ransom-demands...") print("You have a new set of commands. ") print("#9 attempt an escape, or #0 wait for payment.") subInput = int(input("Your sub-command? ")) if subInput == 9: # The number seven was pressed. # Attempt an escape. n = randint(1, 2) # One of two things can happen. if n == 1: # You made it. print("CONGRATULATIONS! YOU SUCCESSFULLY ESCAPED! ") else: # Well, you didn't make it. print("You were mortally wounded by a gunshot wound while trying to escape. ") gameLost = True queryReplay () elif subInput == 0: # The number eight was pressed. print("Your ransom has been payed and you are free to go. The local council is collecting. ") print("Just Wait ") sleep(7) # Stop for ten seconds. you += randint(1, 3) # Move from one to three miles. # The kidnapper slowed you down. elif n == 4: # Your camel is burning across the desert sands. you += randint(6, 20) # Randomly move from one to twenty miles. print("Your camel is burning across the desert sands. ") days -= 3 # Subtract three days from your camel. gameStatus () # You should never get here unless you press number 4. elif mainInput == 4: # let the camel rest. print("Your camel thanks you. ") days = 7 # You now have seven good days left. gocommands -= 1 # Lose one more command. gameStatus () elif mainInput == 5: # Status Check Please? print("Your camel has {0} good days left. You have {1} drinks left in the canteen. You can go {2} commands without drinking.".format(days, drinks, gocommands)) elif mainInput == 6: # HELP! n = randint(1, 2) # Chose whether to give out help or not. if n == 1: # Give Help. print("Help has found you in a state of unconsciousness. ") # Let the camel rest for a while. days = 7 # Your camel is rejubinated. drinks = 3 # You get the half-quart of water. # You drink some water and get more commands. gocommands = 8 # You now have eight commands without drinking. gameStatus () else: # Help hasn't found you. print ("You waited, and waited... and waited... but no help arrived.") gameLost = True queryReplay () elif mainInput == 7: # Exit exitQuery = input ("Are you sure you want to exit? Press Y or N.") exitQuery = exitQuery.upper () while exitQuery != "Y" and exitQuery != "N": print ("Please enter either Y or N.") exitQuery = input ("Are you sure you want to exit? Press Y or N.") exitQuery = exitQuery.upper () if exitQuery == "Y": exit () else: print ("Okay.") elif mainInput == 8: # request program help print("The commands you can choose from are: ") print("1 -- drink from your canteen ") print("2 -- move ahead moderate speed ") print("3 -- move ahead fast ]speed ") print("4 -- stop for a rest ") print("5 -- status check ") print("6 -- hope for help ") print ("7 - exit") print ("8 - get program help and list commands.") else: # Invalid option. print("Invalid Option. ") print("The commands you can choose from are:") print("1 -- drink from your canteen ") print("2 -- move ahead moderate speed ") print("3 -- move ahead fast speed ") print("4 -- stop for a rest ") print("5 -- status check ") print("6 -- hope for help ") print ("7 -- exit") print ("8 -- get program help and list commands.") if __name__ == "__main__": main () # End of program.
njsch/camel
camel.py
camel.py
py
13,805
python
en
code
0
github-code
6
1824122729
from django.http import HttpResponse, JsonResponse from django.shortcuts import render, redirect from book.models import BookInfo # Create your views here. ################################# Request ####################################################################################################################################### def create_book(request): book = BookInfo.objects.create( name='abc', pub_date='2022-5-3', readcount=10 ) return HttpResponse('create') def shop(request, city_id, shop_id): print(city_id,shop_id) query_params = request.GET print(query_params) # order = query_params['order'] # order = query_params.get('oder') # <QueryDict: {'order': ['readcount'], 'page': ['1']}> # QueryDict 具有字典的特性 # 还具有 一键多值 # # <QueryDict: {'order': ['readcount', 'commentcount'], 'page': ['1']}> order = query_params.getlist('order') print(order) return HttpResponse('python_django学习') def register(request): data = request.POST print(data) # < QueryDict: {'username': ['xixi'], 'password': ['123']} > return HttpResponse('Register') def json(request): body = request.body # print(body) # b'{\n\t"name":"xixi",\n\t"age": 28\n}' body_str = body.decode() # print(body_str) """ { "name":"xixi", "age": 28 } <class 'str'> """ # print(type(body_str)) # JSON形式的字符串 可以转换为 Python的字典 import json body_dict = json.loads(body_str) print(body_dict) # {'name': 'xixi', 'age': 28} ##############请求头############ # print(request.META) print(request.META['SERVER_PROTOCOL']) return HttpResponse('json') def method(request): print(request.method) return HttpResponse('method') def mobilePhone(request, phone_number): print(phone_number) return HttpResponse('mobilePhone') ################################### Response ################################################# def response(request): # HttpResponse(content=响应体, content_type=响应体数据类型, status=状态码) # response = HttpResponse('res', status=200) # # response['name'] = 'xixi' # # return response # JSON -> dict # dict -> JSON info = { 'name': 'xixi', 'age': 28 } info_list = [ { 'name': 'xixi', 'age': 28 }, { 'name': 'erxi', 'age': 28 } ] # response = JsonResponse(info) response = JsonResponse(info_list, safe=False) # response = JsonResponse(data=info_list, safe=False) # [{"name": "xixi", "age": 28}, {"name": "erxi", "age": 28}] return response # return redirect('http://www.baidu.com') # import json # data=json.dumps(info_list) # # response = HttpResponse(data) # return response # 1xx # 2xx # 200 成功 # 3xx # 4xx 请求有问题 # 404 找不到页面 路由有问题 # 403 禁止访问 权限问题 # 5xx # HTTP status code must be an integer from 100 to 599 ##################### """ 查询字符串 http://ip:port/path/path/?key=value&key1=value1 url 以 ? 为分割 分为2部分 ?前边为 请求路径 ?后边为 查询字符串 查询字符串 类似于字典 key=value 多个数据采用&拼接 """ ########################### cookie和session ############################################################################## """ 第一次请求,携带 查询字符串 http://127.0.0.1:8000/set_cookie/?username=zhangsan&password=123 服务器接收到请求之后,获取username.服务器设置cookie信息,cookie信息包括 username 浏览器接收到服务器的响应之后,应该把cookie保存起来 第二次及其之后的请求,我们访问http://127.0.0.1:8000 都会携带cookie信息。 服务器就可以读取cookie信息,来判断用户身份 """ def set_cookie(request): # 设置cookies,服务器response设置cookie # 1.获取查询字符串数据 username = request.GET.get('username') pwd = request.GET.get('pwd') # 2.服务器设置cookie response = HttpResponse('set_cookie') # key,value = '' max_age 过期时间,秒 response.set_cookie('name', username, max_age=3600) # 有效期一小时 response.set_cookie('pwd', pwd) # 临时cookie # 删除cookies response.delete_cookie('pwd') return response def get_cookie(request): # 获取cookies 从request中获取 print(request.COOKIES) # request.COOKIES 是字典数据 name = request.COOKIES.get('name') return HttpResponse(name) ################## session ##################### # session 是保存在服务器端 -- 数据相对安全 # session需要依赖于cookie """ 第一次请求 http://127.0.0.1:8000/set_session/?username=zhangsan 。我们在服务器端设置sesison信息 服务器同时会生成一个sessionid的cookie信息。 浏览器接收到这个信息之后,会把cookie数据保存起来 第二次及其之后的请求 都会携带这个sessionid. 服务器会验证这个sessionid. 验证没有问题会读取相关数据。实现业务逻辑 """ def set_session(request): # 1.模拟 获取用户信息 username = request.GET.get('username') # 2. 设置session信息 user_id = 1 request.session['user_id'] = user_id request.session['username'] = username # 删除session # request.session.clear() 清除 所有 session的value # request.session.clear() # request.session.flush() 清除 所有 session的 key&value # request.session.flush() # del request.session['键'] 清除 session 指定 key 的value # del request.session['48e4r7tydk1z8zs6rbvxk0ox1ti14zh2'] # request.session.set_expiry(10) return HttpResponse('set_session') def get_session(request): # 通过索引key 获取 字典 值,当session不存在/不匹配,异常报错,不推荐 # user_id = request.session['user_id'] # username = request.session['username'] user_id = request.session.get('user_id') username = request.session.get('username') content = '{},{}'.format(user_id,username) return HttpResponse(content) ###############################类视图################################### def login(requset): print(requset.method) if requset.method == 'GET': return HttpResponse('get 请求') else: return HttpResponse('post 请求') """ 类视图定义 类视图的定义 class 类视图名字(View): def get(self,request): return HttpResponse('xxx') def http_method_lower(self,request): return HttpResponse('xxx') 1. 继承自View 2. 类视图中的方法 是采用 http方法小写来区分不同的请求方式 """ from django.views import View class LoginView(View): def get(self, request): return HttpResponse('get 处理逻辑') def post(self, request): return HttpResponse('post 处理逻辑') """ 我的订单、个人中心页面 如果登录用户 可以访问 如果未登录用户 不应该访问,应该跳转到登录页面 定义一个订单、个人中心 类视图 如果定义我有没有登录呢??? 我们以登录 后台站点为例 """ from django.contrib.auth.mixins import LoginRequiredMixin # class OrderView(View): # 只继承View类 # class OrderView(View, LoginRequiredMixin): # 多继承LoginRequiredMixin 和 View类, 多继承有先后顺序 class OrderView(LoginRequiredMixin, View): # 多继承LoginRequiredMixin 和 View类 def get(self, request): # 模拟登录标记 # isLogin = True # if not isLogin: # return HttpResponse('未登录,跳转到登录页面') return HttpResponse('GET 我的订单页面,这个页面必须要登录') def post(self, request): isLogin = True # if not isLogin: # return HttpResponse('未登录,跳转到登录页面') return HttpResponse('GET 我的订单页面,这个页面必须要登录')
guoxi-xixi/django_base
bookmanager03/book/views.py
views.py
py
8,205
python
zh
code
0
github-code
6
31788132323
from typing import TYPE_CHECKING, Iterable, List, Optional, Union, overload from ..builtins import types from ..common.error import ConstraintError from ..node import ( ArrayTypeNode, FuncTypeNode, PointerTypeNode, SimpleTypeNode, TypeNode, ) if TYPE_CHECKING: from .block import Expression class ChunkVariable: def __init__( self, name: str, vtype: Optional[TypeNode], chunk: Optional["Chunk"], initial: Optional["Expression"] = None, ): self.name = name self.vtype = vtype self.chunk = chunk self.initial = initial def _typenamestr( self, tp: TypeNode, original: Optional[str], name: Optional[str] ) -> str: original = original or "" name = name or "" if isinstance(tp, SimpleTypeNode): return f"{types.TRANSLATIONS[tp.core]} {name}" elif isinstance(tp, FuncTypeNode): args = ", ".join(self._typenamestr(arg, None, None) for arg in tp.args) if name == original: assert original base = f"{original}({args})" return self._typenamestr(tp.ret, base, base) else: ret = self._typenamestr(tp.ret, None, None) return f"{ret} ({name})({args})" elif isinstance(tp, PointerTypeNode): return self._typenamestr(tp.base, original, f"*{name}") elif isinstance(tp, ArrayTypeNode): return self._typenamestr(tp.base, original, f"{name}[{tp.size}]") else: raise RuntimeError("invalid variable type") def typename(self) -> str: if self.vtype is None: return f"void {self.name}" return self._typenamestr(self.vtype, self.name, self.name) def typestr(self) -> str: if self.vtype is None: return "void" return self._typenamestr(self.vtype, None, None).strip() def _basic_types(self, tp: TypeNode) -> Iterable[str]: if isinstance(tp, SimpleTypeNode): yield tp.core elif isinstance(tp, PointerTypeNode): yield from self._basic_types(tp.base) elif isinstance(tp, ArrayTypeNode): yield from self._basic_types(tp.base) elif isinstance(tp, FuncTypeNode): yield from self._basic_types(tp.ret) for arg in tp.args: yield from self._basic_types(arg) else: raise RuntimeError("invalid variable type") def basic_types(self) -> Iterable[str]: if self.vtype is None: return iter(()) return self._basic_types(self.vtype) def __repr__(self) -> str: return f"<{self.__class__.__name__} {self.typename()}>" class ChunkConstraint: def __init__(self, islocal=False, isglobal=False, static=False): self.islocal = islocal self.isglobal = isglobal self.static = static self._verify() def copy(self) -> "ChunkConstraint": return ChunkConstraint( islocal=self.islocal, isglobal=self.isglobal, static=self.static ) def merge(self, other: "ChunkConstraint"): self.islocal = self.islocal or other.islocal self.isglobal = self.isglobal or other.isglobal self.static = self.static or other.static self._verify() def _verify(self): if self.islocal and self.isglobal: raise ConstraintError("cannot allow local and global constraints") def __repr__(self) -> str: return f"<{self.__class__.__name__} local={self.islocal} global={self.isglobal} static={self.static}>" class Chunk: def __init__( self, variables: List[ChunkVariable], constraint: Optional[ChunkConstraint] = None, ): self.variables = variables self._table = { var.name: i for i, var in enumerate(variables) if not var.name.startswith("_") } self.constraint = ChunkConstraint() if constraint is None else constraint @property def varnames(self) -> List[str]: return [var.name for var in self.variables] def add_variable(self, variable: ChunkVariable): if variable.name in self._table: raise KeyError("variable already exists in chunk") self.variables.append(variable) self._table[variable.name] = len(self.variables) - 1 def rename_variable(self, variable: ChunkVariable, name: str): if variable not in self.variables: raise KeyError("variable not in chunk") idx = self._table[variable.name] self._table.pop(variable.name) variable.name = name self._table[variable.name] = idx def remove_variable(self, variable: ChunkVariable): if variable.name not in self._table: raise KeyError("variable not in chunk table") idx = self._table[variable.name] target = self.variables[idx] if target is not variable: raise KeyError("variable does not match") self.variables.remove(target) self._table.pop(target.name) def lookup(self, name: str) -> Optional[ChunkVariable]: i = self._table.get(name) if i is None: return None else: return self.variables[i] def __contains__(self, var: Union[str, ChunkVariable]) -> bool: if isinstance(var, str): return var in self._table else: return var in self.variables def __repr__(self) -> str: names = ", ".join(var.name for var in self.variables) return f"<{self.__class__.__name__} {names}>" @overload def merge_chunks(first: Optional[Chunk], second: Chunk) -> Chunk: ... @overload def merge_chunks(first: Chunk, second: Optional[Chunk]) -> Chunk: ... def merge_chunks(first: Optional[Chunk], second: Optional[Chunk]) -> Chunk: if first is None: assert second is not None return second if second is None: assert first is not None return first constraint = first.constraint.copy() constraint.merge(second.constraint) return Chunk([*first.variables, *second.variables], constraint)
jedevc/fyp
vulnspec/graph/chunk.py
chunk.py
py
6,242
python
en
code
0
github-code
6
21275819456
"""Defines all necessary networks for training / evaluation """ from typing import Optional, Tuple import mindspore.nn as nn from mindspore import Tensor from .backbones import Backbone from .decoders import Decoder from .heads import Head from .loss import Loss from .necks import Neck class Net(nn.Cell): """Create network for foward and backward propagate. Args: backbone: Model backbone head: Model head neck: Model neck. Default: None Inputs: | x: Tensor Outputs: | result: Tensor """ def __init__( self, backbone: Backbone, head: Head, neck: Optional[Neck] = None ) -> None: super().__init__() self.backbone = backbone self.head = head self.neck = neck self.has_neck = self.neck is not None def construct(self, x: Tensor) -> Tensor: x = self.backbone(x) if self.has_neck: x = self.neck(x) x = self.head(x) return x class EvalNet(nn.Cell): """Create network for forward propagate and decoding only. Args: net: Network used for foward and backward propagate decoder: Decoder output_raw: Return extra net's ouput. Default: True Inputs: | inputs: List of tensors Outputs | result: Decoded result | raw_result (optional): Raw result if output_raw is true """ def __init__(self, net: Net, decoder: Decoder, output_raw: bool = True) -> None: super().__init__() self.net = net self.decoder = decoder self.output_raw = output_raw self.net.set_train(False) self.decoder.set_train(False) def construct(self, *inputs: Tensor) -> Tuple[Tensor, ...]: x = self.net(inputs[0]) result = self.decoder(x, *inputs[1:]) if self.output_raw: return result, x return result class NetWithLoss(nn.Cell): """Create network with loss. Args: net: Network used for foward and backward propagate loss: Loss cell has_extra_inputs: Has Extra inputs in the loss calculation. Default: False Inputs: | data: Tensor feed into network | label: Tensor of label | extra_inputs: List of extra tensors used in loss calculation Outputs: | loss: Loss value """ def __init__(self, net: Net, loss: Loss, has_extra_inputs: bool = False) -> None: super().__init__() self.net = net self.loss = loss self.has_extra_inputs = has_extra_inputs def construct(self, data: Tensor, label: Tensor, *extra_inputs: Tensor) -> Tensor: out = self.net(data) if self.has_extra_inputs: return self.loss(out, label, *extra_inputs) return self.loss(out, label)
mindspore-lab/mindpose
mindpose/models/networks.py
networks.py
py
2,807
python
en
code
15
github-code
6
10678150202
import asyncio from typing import List, Any, Set, Dict import orjson import websockets from websockets import WebSocketServerProtocol from blockchain import Blockchain from block import Block from transaction import Transaction from utils import send, handle class WsNode: def __init__(self, domain: str): self.domain: str = domain self.nodes: Set[str] = set() self.connects: Dict[str, WebSocketServerProtocol] = dict() self.blockchain: Blockchain = Blockchain() self.mem_pool: Set[Transaction] = set() async def serve(self, node: str): ws = self.connects[node] while True: try: await self.handle(ws, orjson.loads(await ws.recv())) except websockets.ConnectionClosed: self.nodes.remove(node) self.connects.pop(node) break async def handle(self, ws, message): switcher = { 'blockchain_len': self.handle_blockchain_len, 'blockchain': self.handle_blockchain, 'hashes': self.handle_hashes, } await handle(switcher, ws, message) async def broadcast(self, _type: str, data: Any = None, nodes: List[str] = None) -> None: sockets = self.connects.values() if nodes is None else [self.connects[node] for node in nodes] await asyncio.gather(*[send(ws, _type, data) for ws in sockets]) async def connect_nodes(self, nodes: List[str]): olds = [self.domain] + self.node_list news = [] for node in filter(lambda x: x not in olds, nodes): news.append(node) websocket = await websockets.connect(f'ws://{node}') asyncio.get_event_loop().create_task(self.serve(node)) self.nodes.add(node) self.connects[node] = websocket inputs = [(node, olds + news) for node in news] + [(node, news) for node in olds] if len(news) > 1 or (len(news) > 0 and self.domain not in news): await asyncio.gather(*[self.share_nodes(*args) for args in inputs]) await self.pull_longest_chain(news) async def share_nodes(self, node: str, nodes: List[str]): print('share', nodes, 'to', node) if node != self.domain: ws = self.connects[node] await send(ws, 'connect_nodes', {'nodes': nodes}) async def share_block(self, block: Block): await self.broadcast('add_block', {'block': block.dict()}) async def pull_longest_chain(self, nodes: List[str] = None): await self.broadcast('get_blockchain_len', nodes=nodes) async def add_transaction(self, transaction: Transaction): if transaction in self.mem_pool: return self.mem_pool.add(transaction) await self.broadcast('add_transaction', {'transaction': transaction.dict()}) @property def blockchain_len(self) -> int: return len(self.blockchain) @property def node_list(self) -> List[str]: return list(self.nodes) @property def mem_pool_list(self) -> List[Transaction]: return list(self.mem_pool) async def handle_blockchain_len(self, length: int) -> str: if length > self.blockchain_len: return 'get_blockchain_hashes' async def handle_hashes(self, hashes: List[str]): start = 0 for i, (a, b) in enumerate(zip(hashes, self.blockchain.hashes)): if a != b: start = i break return 'get_blockchain', {'start': start} async def handle_blockchain(self, chain): if chain[-1]['id'] > self.blockchain_len: self.blockchain.blocks[chain[0]['id']:] = [Block.parse_obj(block_data['block']) for block_data in chain]
XmasApple/simple_blockchain
ws_node.py
ws_node.py
py
3,756
python
en
code
0
github-code
6
16563057746
import numpy as np import time, cv2, copy, os, random, sys # Check if Running On Pi import io import os def is_raspberrypi(): try: with io.open('/sys/firmware/devicetree/base/model', 'r') as m: if 'raspberry pi' in m.read().lower(): return True except Exception: pass return False from matplotlib import pyplot as plt import matplotlib.image as mpimg class image_processor: def __init__(self, pixelValues, displayDim, image_folder): #displayName = 'generic display' # Final Image Dimensions and Colors self.dispWidth = displayDim[0] self.dispHeight = displayDim[1] self.pixelColors = pixelValues self.image_folder = image_folder #print('processor extablished for ' + displayName + ' dimension: ' + str(self.displayWidth) + 'x' + str(self.displayHeight) + ' pixel values: ' + pixelValues) def newImage(self, image_title): self.imgTitle = str(sys.path[0])+ '\DispPics' + str(image_title) print("imported Image Title = " + self.imgTitle + " ----- of type " + str(type(self.imgTitle))) def getImageTitle(self): return self.imgTitle def __displayRGB(self): r = self.__imageResizeRGB() plt.imshow(r) plt.show() # split self off def __imageResizeRGB(self): img = cv2.imread(self.imgTitle) resized = cv2.resize(img, (self.dispWidth, self.dispHeight), interpolation = cv2.INTER_AREA) return resized def __displayBW(self): r = self._imageResizeBW() plt.imshow(r, cmap = "gray") plt.show() # split self off def __imageResizeBW(self): img = cv2.imread(self.imgTitle) imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) resized = cv2.resize(imgGray, (self.dispWidth, self.dispHeight), interpolation = cv2.INTER_AREA) return resized def __reduceColors(self, img, K): n = img[0][0].size Z = img.reshape((-1,n)) # convert to np.float32 Z = np.float32(Z) # define criteria, number of clusters(K) and apply kmeans() criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0) ret,label,center=cv2.kmeans(Z,K,None,criteria,10,cv2.KMEANS_RANDOM_CENTERS) # Now convert back into uint8, and make original image center = np.uint8(center) res = center[label.flatten()] res2 = res.reshape((img.shape)) return res2 def __removeColors(self, img): recorded = np.unique(img) imgCopy = copy.deepcopy(img) for y in range(0,len(img)): for x in range(0,len(img[0])): for n in range(0,len(recorded)): if imgCopy[y][x] == recorded[n]: imgCopy[y][x] = n return imgCopy def defaultConverter(self, imgTit = False, k = 4): if imgTit is False: self.getRandomImage() else: self.newImage(imgTit) bw = self.__imageResizeBW() lowRes = self.__reduceColors(bw, k) remapped = self.__removeColors(lowRes) return remapped # Fucking Hell getRandomImage not working consistently def getRandomImage(self): #Compensate if is real raspberry pi n=0 random.seed() print("penis") print(str(sys.path[0]) + self.image_folder) print("penis") for root, dirs, files in os.walk(str(sys.path[0]) + self.image_folder): print("penis") for name in files: n += 1 if random.uniform(0, n) < 1: print("got rfile") rfile = os.path.join(root, name) else: print("rfile not selected") print(rfile) self.imgTitle = rfile if __name__ == '__main__': dispDim = (16, 16) directory = "/DispPics" ip = image_processor(('#CD853F','#8B5A2B','#008080','#D8BFD8'), dispDim, directory) print(ip.defaultConverter(k = 3)) i = 1 while True: time.sleep(1) i += 1
Rolling-Blocks/RB-CODE-Prototype-1
image_processor.py
image_processor.py
py
4,098
python
en
code
1
github-code
6
72486084988
n = int(input("Enter the value of n: ")) for i in range(11, n+1): # Divisible by both 3 & 7 if i % 21 == 0: print("TipsyTopsy") elif i % 7 == 0: print("Topsy") elif i % 3 == 0: print("Tipsy") else: print(i)
arnab7070/BeyondCoding
Python Programs/AOT IT Workshop/Final Lab Exam Revison/question14.py
question14.py
py
260
python
en
code
1
github-code
6
31236652781
from youtube3.youtube import * import json from oauth2client.tools import argparser import re def process_videos(workDir='.', inputFile='liked.json', recommendedFile='recommended.json', excludedFile='excluded.json', postponedFile='postponed.json',maxCount=5): recommended, excluded, postponed, liked = {}, {}, {}, {} workDir, inputFile, recommendedFile, excludedFile, postponedFile = workDir or '.', inputFile or 'liked.json', \ recommendedFile or 'recommended.json', excludedFile or 'excluded.json', postponedFile or 'postponed.json' liked = load_definition(liked, inputFile, workDir) recommended = load_definition(recommended, recommendedFile, workDir) excluded = load_definition(excluded, excludedFile, workDir) postponed = load_definition(postponed, postponedFile, args.workDir) start = int(args.start) if args.start else 0 end = min(int(args.end), len(liked)) if args.end else len(liked) youtube = Youtube(get_authenticated_service(args)) likedList = list(liked.items())[start:end] for videoId, title in likedList: print("Now processing %s, %s" % (videoId, title)) for relatedvideos in youtube.iterate_related_videos(videoId, maxCount): for item in relatedvideos['items']: rvideoId, rtitle = item['id']['videoId'], item['snippet']['title'] if rvideoId not in liked and rvideoId not in excluded and rvideoId not in postponed: if rvideoId not in recommended: recommended[rvideoId] = {"title": rtitle, "count": 1} else: recommended[rvideoId]["count"] += 1 recommendedSorted = sorted(recommended.items(), key=lambda x: x[1]["count"], reverse=True) return recommendedSorted def load_definition(records, inputFile, workDir): inputFileC = workDir + '/' + inputFile if os.path.isfile(inputFileC): with open(inputFileC, 'r', encoding="utf-8") as f: records = dict(json.load(f)) else: print("Cannot find file {}".format(inputFileC)) return records def tokenize_lists( recommended, liked, workDir , ignore_words_file): def get_tokenized(str,ignored_words): str = str.lower() str = re.sub(r"\(.*\)", "" , str) str = re.sub(r"[0-9]+", "", str) strtok = re.split(r'[\[\s\-\(\)\"\\\/\|\!\&\,\.\+]',str) strl = [s for s in strtok if s not in ignored_words and len(s) > 0] return strl ignored_words = [] if os.path.isfile(workDir + '/' + ignore_words_file): with open(workDir + '/' + ignore_words_file, 'r', encoding="utf-8") as f: ignored_words = f.read().splitlines() ignored_words = [ i.lower() for i in ignored_words] tok_liked = {k:get_tokenized(v,ignored_words) for k,v in liked.items()} tok_liked_list = [get_tokenized(v, ignored_words) for k, v in liked.items()] #print(tok_liked_list) tok_recommended = {k: {"title": get_tokenized(v["title"],ignored_words), "count": v["count"]} for k, v in recommended.items()} tok_duplicates = {k: {"title": v["title"], "count": v["count"]} for k, v in tok_recommended.items() if v["title"] in tok_liked_list} tok_no_duplicates = {k: {"title": v["title"], "count": v["count"]} for k, v in tok_recommended.items() if v["title"] not in tok_liked_list} return tok_duplicates, tok_no_duplicates def save_recommended(workDir='.', recommendedFile='recommended.json', recommendedSorted={} ): workDir, recommendedFile, recommendedSorted = workDir or '.', \ recommendedFile or 'recommended.json', recommendedSorted or {} save_to_json(recommendedFile, recommendedSorted, workDir) def save_to_json(outputFile, outputData, workDir): with open(workDir + '/' + outputFile, 'w', encoding="utf-8") as f: json.dump(outputData, f, ensure_ascii=False) print("Saved file: {}".format(workDir + '/' + outputFile)) def retrieve_recommended(args): recommendedSorted = process_videos(workDir=args.workDir, inputFile=args.inputFile, recommendedFile=args.recommendedFile, excludedFile=args.excludedFile, postponedFile=args.postponedFile, maxCount=args.maxCount) save_recommended(workDir=args.workDir, recommendedFile=args.recommendedFile, recommendedSorted=recommendedSorted) return recommendedSorted def eliminate_duplicates(args): liked, recommended = {}, {} liked = load_definition(liked, args.inputFile, args.workDir) recommended = load_definition(recommended, args.recommendedFile or 'recommended.json', args.workDir) duplicates, no_duplicates = tokenize_lists(recommended=recommended, liked=liked, workDir=args.workDir, ignore_words_file='ignore_words.txt') save_to_json(outputData=list([[k, v] for k, v in duplicates.items()]), outputFile='duplicates.json', workDir=args.workDir) save_to_json(outputData=list([[k, v] for k, v in no_duplicates.items()]), outputFile='recommended_no_dup.json', workDir=args.workDir) if __name__ == "__main__": argparser.add_argument('--workDir') argparser.add_argument('--maxCount') argparser.add_argument('--inputFile') argparser.add_argument('--start') argparser.add_argument('--end') argparser.add_argument('--recommendedFile') argparser.add_argument('--excludedFile') argparser.add_argument('--postponedFile') args = argparser.parse_args() if (args.workDir is None): print("Usage : python recommend_videos.py --workdDir <workDir> --maxCount <maxCount> --inputFile <file>") sys.exit(0) if not os.path.isdir(args.workDir): print("{} does not exist -- exiting".format(args.workDir)) sys.exit(0) retrieve_recommended(args) eliminate_duplicates(args)
diegoami/DA-youtube-scripts
youtube-scripts/recommend_videos.py
recommend_videos.py
py
6,021
python
en
code
0
github-code
6
14175992166
import math import random import copy import numpy import numpy as np file = open("australian.dat", "r") l = [] for line in file: l.append(line.split()) wynik = [] for i in l: wynik.append(list(map(lambda e: float(e), i))) mojalista = wynik def MetrykaEuklidesowa(listaA, listaB): tmp = 0 for i in range(len(listaA)-1): tmp += (listaA[i] - listaB[i])**2 return math.sqrt(tmp) def zadanie1(lista): slownik = {} for i in lista[1:]: if i[14] not in slownik.keys(): slownik[i[14]] = [MetrykaEuklidesowa(lista[0], i)] else: slownik[i[14]].append(MetrykaEuklidesowa(lista[0], i)) return slownik # print(MetrykaEuklidesowa(mojalista[0], mojalista[3])) # print(zadanie1(mojalista)[1.0]) m = [[1,2,3], [3,4,5], [2,4,5]] def wskaznik(macierz, wynik=0): indeksy = list(range(len(macierz))) if len(macierz) == 2 and len(macierz[0]) == 2: wartosc = macierz[0][0] * macierz[1][1] - macierz[1][0] * macierz[0][1] return wartosc for fc in indeksy: macierz_kopia = macierz.copy() macierz_kopia = macierz_kopia[1:] wysokosc = len(macierz_kopia) for i in range(wysokosc): macierz_kopia[i] = macierz_kopia[i][0:fc] + macierz_kopia[i][fc + 1:] znak = (-1) ** (fc % 2) pod_wskaznik = wskaznik(macierz_kopia) wynik += znak * macierz[0][fc] * pod_wskaznik return wynik # print(wskaznik(m)) def MetrykaEuklidesowaInaczej(listaA, listaB): tmp = sum((elem1-elem2)**2 for elem1, elem2 in zip(listaA, listaB)) return math.sqrt(tmp) def odlegosciOdx(lista, x): wynik = [] for i in lista: para = (i[-1], (MetrykaEuklidesowa(x, i))) wynik.append(para) return wynik def segregacjaOdleglosci(lista): slownik = {} for i in lista: if i[0] not in slownik.keys(): slownik[i[0]] = [i[1]] else: slownik[i[0]].append(i[1]) return slownik def sumowanieOdleglosci(lista, k): slownik = {} for i in lista.keys(): tmp_list = lista[i] tmp_list.sort() slownik[i] = sum(tmp_list[0:k]) return slownik def getList(dict): list = [] for key in dict.keys(): list.append(key) return list def decyzja(lista): min = lista[0.0] dec = 0 for i in getList(lista)[1:]: if lista[i] == min: return None if lista[i] < min: min = lista[i] dec = i return dec def MetrykaEuklidesowa2(listaA, listaB, czyOstatni=True): tmp = 0 if czyOstatni: listaA=listaA[:-1] listaB=listaB[:-1] v1 = np.array(listaA) v2 = np.array(listaB) c = v1 - v2 tmp =np.dot(c,c) return math.sqrt(tmp) def decyzja2(lista, x, k): odleglosc = odlegosciOdx(lista, x) slownik = segregacjaOdleglosci(odleglosc) sumaodleglosci = sumowanieOdleglosci(slownik, k) buff_lista = [(k, v) for k, v in sumaodleglosci.items()] min = buff_lista[0][1] dec = 0 for para in buff_lista[1:]: if para[1] == min: return None if para[1] < min: min = para[1] dec = para[0] return dec argx = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] print(decyzja2(mojalista, argx, 5)) # print(MetrykaEuklidesowa(mojalista[0], mojalista[3])) # print(MetrykaEuklidesowaInaczej(mojalista[0], mojalista[3])) # print(segregacjaOdleglosci(odlegosciOdx(mojalista, argx))) # print(sumowanieOdleglosci(segregacjaOdleglosci(odlegosciOdx(mojalista, argx)), 5)) # print(decyzja(sumowanieOdleglosci(segregacjaOdleglosci(odlegosciOdx(mojalista, argx)), 5))) # print("------------------------------------------") # print(MetrykaEuklidesowa(mojalista[0], mojalista[3])) # print(MetrykaEuklidesowa2(mojalista[0], mojalista[3])) # slow = {0.0: 17.9, 1.0: 1.2, 3.0: 1.2} # print(decyzja(slow)) def calki_monte_carlo(f, a, b, n): result = 0 for i in range(n): result += f(random.uniform(a, b)) return (result / n) * (b - a) #print(calki_monte_carlo(lambda x: x**2, 0, 1, 5000)) def calki_kwadraty(f, a, b, n): step = (b - a) / n result = 0 for i in range(n): result += f(a + i * step) * step return result #print(calki_kwadraty(lambda x: x**2, 0, 1, 5000)) def segregacjaKolorowan(lista): slownik = {} for i in lista: if i[-1] not in slownik.keys(): slownik[i[-1]] = [i[0:-1]] else: slownik[i[-1]].append(i[0:-1]) return slownik def losoweKolorowanie(lista, iloscTypowZachowan): # losowe kolorowanie for i in lista: i[-1] = float(random.randint(0, iloscTypowZachowan - 1)) return lista def kMeans2(lista): buff_lista = copy.deepcopy(lista) slownikWynik = {} slownik = segregacjaKolorowan(buff_lista) punktyCiezkosci = {} for klasa in slownik: minimalna = float(math.inf) for element in slownik[klasa]: sumaOdleglosci = 0 for i in range(len(slownik[klasa])): sumaOdleglosci += MetrykaEuklidesowa2(element, slownik[klasa][i]) sredniaOdleglosci = sumaOdleglosci / len(slownik[klasa]) if sredniaOdleglosci < minimalna: punktyCiezkosci[klasa] = (element, sredniaOdleglosci) minimalna = sredniaOdleglosci for klasa in slownik: for element in slownik[klasa]: minimalna = float(math.inf) punkt = () for klasaCiezkosci in punktyCiezkosci: odlegloscDoPunktuCiezkosci = MetrykaEuklidesowa2(element, punktyCiezkosci[klasaCiezkosci][0]) if odlegloscDoPunktuCiezkosci < minimalna: punkt = punktyCiezkosci[klasaCiezkosci] minimalna = odlegloscDoPunktuCiezkosci for klasaCiezkosci in punktyCiezkosci: if punkt == punktyCiezkosci[klasaCiezkosci]: if klasaCiezkosci not in slownikWynik.keys(): slownikWynik[klasaCiezkosci] = [element] else: slownikWynik[klasaCiezkosci].append(element) listaWynik = [] for klasa in slownikWynik: for element in slownikWynik[klasa]: element.append(klasa) listaWynik.append(element) if listaWynik == buff_lista: return listaWynik else: return kMeans2(listaWynik) # print(mojalista) # kopia_mojalista = copy.deepcopy(mojalista) # listaPoKmeans = kMeans2(losoweKolorowanie(kopia_mojalista, 2)) # mojalista.sort() # print(mojalista) # listaPoKmeans.sort() # print(listaPoKmeans) def sredniaArytmetyczna(listaA, czyOstatni=True): if czyOstatni: listaA=listaA[:-1] v1 = np.array(listaA) ilosc = len(v1) srednia = sum(v1)/ilosc return srednia def sredniaArytmetycznaWektorowo(listaA, wektorJedynek): v1 = np.array(listaA) tmp = np.dot(v1, wektorJedynek) srednia = tmp/len(v1) return srednia c = [1, 1, 1, 1] print(sredniaArytmetycznaWektorowo([1,2,5,6], c)) #print(sredniaArytmetyczna([1,2,3,4,5],False)) def wariancja(listaA, czyOstatni=True): srednia = sredniaArytmetyczna(listaA, czyOstatni) if czyOstatni: listaA=listaA[:-1] v1 = np.array(listaA) sum = 0 for i in v1: sum += (i - srednia)**2 war = sum/len(v1) return war def wariancjaWektorowo(listaA, c): sr = sredniaArytmetycznaWektorowo(listaA, c) v1 = np.array(listaA) vectorOnes = np.ones(len(listaA)) v2 = v1 - sr * vectorOnes c = np.dot(v2, v2) return c / len(listaA) # print(wariancjaWektorowo([1,2,5,6],c)) def odchylenieStandardowe(listaA, czyOstatni=True): war = wariancja(listaA, czyOstatni) return math.sqrt(war) # print(odchylenieStandardowe([7, 4, -2], False)) def sredniaWektorow(lista, czyOstatni=True): lista_wynik = [] if czyOstatni: for elem in lista: elem = elem[:-1] lista_wynik.append(elem) else: lista_wynik = copy.deepcopy(lista) return [sum(x) / len(x) for x in zip(*lista_wynik)] #print(sredniaWektorow([[1, 2, 3], [1, 2, 3], [6, 9, 4], [4, 6, 1]], True)) def wariancjaWektorow(lista, czyOstatni=True): srednia = sredniaWektorow(lista, czyOstatni) lista_buff = [] if czyOstatni: for elem in lista: elem = elem[:-1] lista_buff.append(elem) else: lista_buff = copy.deepcopy(lista) return [ sum([(x - srednia[i]) ** 2 for i, x in enumerate(elem)]) / len(elem) for elem in lista_buff ] # print(wariancjaWektorow([[1, 2, 3], [2, 4, 3], [6, 9, 4], [5, 1, 4]], True)) def odchylenieStandardoweWektorow(lista, czyOstatni=True): return [math.sqrt(x) for x in wariancjaWektorow(lista, czyOstatni)] #print(odchylenieStandardoweWektorow([[1, 2, 3], [2, 4, 3], [6, 9, 4]], False)) #(2,1) #(5,2) #(7,3) #(8,3) # Wynik beta0 =2/7 beta1=5/14 def regersjaLiniowa(list): x = np.array([i[0] for i in list]) x_transposed = np.array([ np.ones(len(x)), x ]) x = np.transpose(x_transposed) y = np.transpose(np.array([i[1] for i in list])) x_t = np.linalg.inv(np.dot(x_transposed, x)) r = np.dot(x_t, x_transposed) r = np.dot(r, y) return r list = [[2, 1], [5, 2], [7, 3], [8, 3]] A = np.array([[1, 0, 2], [2, 1, 0], [0, 3, 1]]) A1 = np.array([[1, 0, 1, 0, 1], [1, 1, 0, 1, 0], [0, 1, 1, 0, 0], [0, 1, 0, 1, 1], [1, 0, 0, 1, 1]]) def funkcajaRzA(list, Q): return np.dot(np.transpose(Q), list) def funkcjaProj(vector_v, vecor_u): L = numpy.dot(vector_v, vecor_u) M = numpy.dot(vecor_u, vecor_u) projekcja = (L/M) * vecor_u return projekcja def funkcajaQzA(list): dlugosc_u1 = math.sqrt(numpy.dot(list[0:,0],list[0:,0])) e1 = (1/dlugosc_u1) * list[0:,0] Q = np.array([e1]) U = np.array([list[0:,0]]) U = np.transpose(U) for i in range(0, np.shape(A)[1]-1): proj_buff = 0 for y in range(i+1): p = funkcjaProj(list[0:, i+1], U[0:, y]) proj_buff += p u = list[0:, i+1] - proj_buff U = np.transpose(U) U = numpy.append(U, [u], axis=0) U = np.transpose(U) dlugosc_u = math.sqrt(numpy.dot(u, u)) if dlugosc_u == 0: e = u else: e = (1/dlugosc_u) * u Q = numpy.append(Q, [e], axis=0) return np.transpose(Q) print("------------------------------------") Q = funkcajaQzA(A) Q = np.matrix.round(Q, 3) print("Macierz Q z A:") print(Q) print("---------------------------") R = funkcajaRzA(A, funkcajaQzA(A)) R = np.matrix.round(R, 3) print("Macierz R z A:") print(R) def A_nastepna(A): Q = funkcajaQzA(A) return np.dot(np.dot(np.transpose(Q), A), Q) def czyMacierzGornoTrojkatna(list): rozmiar = np.shape(list)[1] if (np.diag(list)-np.transpose(np.dot(list, np.ones((rozmiar, 1))))).all() > 0.00001: return True else: return False def wartosciWlasne(list): buff_A = copy.deepcopy(list) while czyMacierzGornoTrojkatna(buff_A): buff_A = A_nastepna(buff_A) return np.diag(buff_A) A2 = np.array([[1, 2, 3], [4, 1., 5], [7, 5., 1]]) test = np.array([[5, 2, 4], [2, 4, 0], [4, 0, 4]]) print("Wartości własne A2:") print(wartosciWlasne(A2)) def gaussJordan(list): rozmiar = np.shape(list)[0] wektor = [] for i in range(rozmiar): if list[i][i] == 0.0: return "Wykryto zero!" for j in range(rozmiar): if i != j: ratio = list[j][i] / list[i][i] for k in range(rozmiar + 1): list[j][k] = list[j][k] - ratio * list[i][k] for x in range(rozmiar): wektor.append(list[x][rozmiar] / list[x][x]) return wektor def odejmoanieWarotsciWlasnej(list, wartoscWlasna): buff_list = copy.deepcopy(list) rozmiar = np.shape(list)[1] for i in range(rozmiar): for j in range(rozmiar): if i == j: buff_list[i][j] = list[i][j] - wartoscWlasna return buff_list def dodanieKolumnyZer(list, wartosciWlasne): wynik = {} rozmiar = np.shape(list)[1] zera = np.zeros((rozmiar, 1)) x = 0 for i in wartosciWlasne: wynik[x] = np.hstack((odejmoanieWarotsciWlasnej(list, i), zera)) x+=1 return wynik def wektoryWlasne(list, watosci_wlasne): macierze = dodanieKolumnyZer(list, watosci_wlasne) wektory = [] for i in macierze: macierze[i] = np.delete(macierze[i], len(macierze) - 1, 0) wektory.append((np.round(gaussJordan(macierze[i]) + [-1.], 3) * -1).tolist()) return wektory print("Wektory własne A2:") print(wektoryWlasne(A2, wartosciWlasne(A2))) A3 = np.array([[1,1,1,0,1,0,0,0], [1,1,1,0,-1,0,0,0], [1,1,-1,0,0,1,0,0], [1,1,-1,0,0,-1,0,0], [1,-1,0,1,0,0,1,0], [1,-1,0,1,0,0,-1,0], [1,-1,0,-1,0,0,0,1], [1,-1,0,-1,0,0,0,-1]]) def czyOrtogonalnaMacierz(macierz): macierz_buff = np.dot(np.transpose(macierz), macierz) x = np.count_nonzero(macierz_buff - np.diag(np.diagonal(macierz_buff))) if x == 0: return True else: return False def ortonormalizacja(macierz): macierz = np.transpose(macierz) macierz_buff = [] for i in macierz: dlugosc_wektora = math.sqrt(np.dot(i,i)) print(dlugosc_wektora) macierz_buff.append(i/dlugosc_wektora) macierz_wynik = np.dot(np.transpose(macierz_buff), macierz_buff) return macierz_buff, macierz_wynik # macierz_buff macierz ortonormalna macierz_wynik b* (b^-1) # print(czyOrtogonalnaMacierz(A3)) wektorA =np.array([8,6,2,3,4,6,6,5]) def Btr_przez_wektor_A(macierz ,wektorA): return np.dot(macierz, wektorA) # macierz_ortonormalna, jednostkowa = ortonormalizacja(A3) # print(np.round(jednostkowa,3)) # print("-------------------------------") # print(np.round(macierz_ortonormalna,2)) # print("-------------------------------") # print(np.round(Btr_przez_wektor_A(macierz_ortonormalna,wektorA), 3)) A4 = np.array([[1, 2,0], [2, 0, 2]]) def SVD(macierz): row, col = np.shape(macierz) if col >=row: AtA = np.dot(np.transpose(macierz),macierz) wartosci_wlanse = np.sort(np.round(np.linalg.eig(AtA)[0], col))[::-1] wekrory_v_bezdlugosci = wektoryWlasne(AtA, wartosci_wlanse) wektory_v = [] for i in range(0, col): dlugosc = np.round(math.sqrt(np.dot(wekrory_v_bezdlugosci[i],wekrory_v_bezdlugosci[i])),3) wektor = [x * 1/dlugosc for x in wekrory_v_bezdlugosci[i]] wektory_v.append(wektor) wektory_u = [] for j in range(0, row): if wartosci_wlanse[j] == 0: return "Nie da się obliczyć tym sposobem" else: wektor_u = np.dot(macierz,wektory_v[j]) * 1/math.sqrt(wartosci_wlanse[j]) wektory_u.append(wektor_u) E = np.zeros((row, col)) for y in range(0, row): E[y][y] = math.sqrt(wartosci_wlanse[y]) else: AAt = np.dot(macierz, np.transpose(macierz)) wartosci_wlanse = np.sort(np.round(np.linalg.eig(AAt)[0], row))[::-1] wekrory_u_bezdlugosci = wektoryWlasne(AAt, wartosci_wlanse) wektory_u = [] for i in range(0, row): dlugosc = np.round(math.sqrt(np.dot(wekrory_u_bezdlugosci[i],wekrory_u_bezdlugosci[i])),3) wektor = [x * 1/dlugosc for x in wekrory_u_bezdlugosci[i]] wektory_u.append(wektor) wektory_v = [] for j in range(0, col): if wartosci_wlanse[j] == 0: return "Nie da się obliczyć tym sposobem" else: wektor_v = np.dot(np.transpose(macierz), wektory_u[j]) * 1 / math.sqrt(wartosci_wlanse[j]) wektory_v.append(wektor_v) E = np.zeros((row, col)) for y in range(0, col): E[y][y] = math.sqrt(wartosci_wlanse[y]) print(E) Vt = np.zeros((col, col)) for x in range(0, col): for k in range(0, col): Vt[x][k] = wektory_v[x][k] U = np.zeros((row, row)) for x in range(0, row): for k in range(0, row): U[x][k] = wektory_u[x][k] U = np.transpose(U) return U, E, Vt U, E, Vt = SVD(A4) print("Macierz U") print(U) print("Macierz Epsilon") print(E) print("Macierz Vt") print(Vt)
Tomasz-Wegrzynowski/MetodyInzWiedzy
main.py
main.py
py
16,554
python
pl
code
0
github-code
6
8099610278
from datetime import datetime import os # from dataclasses import dataclass from sensor.constant.trainingPipeline_consts import * class TrainingPipelineConfig: def __init__(self, timestamp=datetime.now()): timestamp = timestamp.strftime("%m_%d_%Y_%H_%M_%S") self.pipeline_name: str = PIPELINE_NAME self.artifact_dir: str = os.path.join(ARTIFACT_DIR, timestamp) self.timestamp: str = timestamp class DataIngestionConfig: def __init__(self, training_pipeline_config:TrainingPipelineConfig): self.data_ingestion_dir: str = os.path.join( training_pipeline_config.artifact_dir, DATA_INGESTION_DIR_NAME ) self.feature_store_file_path: str = os.path.join( self.data_ingestion_dir, DATA_INGESTION_FEATURE_STORE_DIR, FILE_NAME ) self.training_file_path: str = os.path.join( self.data_ingestion_dir, DATA_INGESTION_INGESTED_DIR, TRAIN_FILE_NAME ) self.testing_file_path: str = os.path.join( self.data_ingestion_dir, DATA_INGESTION_INGESTED_DIR, TEST_FILE_NAME ) self.train_test_split_ratio: float = DATA_INGESTION_TRAIN_TEST_SPLIT_RATION self.collection_name: str = DATA_INGESTION_COLLECTION_NAME
sverma1999/sensor-fault-detection
sensor/entity/config_entity.py
config_entity.py
py
1,269
python
en
code
1
github-code
6
70945141308
# 동이름으로 주소 찾기 try: dong = input('동이름 입력 :') #print(dong) with open('zipcode.txt', mode='r', encoding='euc-kr') as f: line = f.readline() # readline은 한줄, readlines는 모두 다 읽어옴 #print(line) while line: lines = line.split('\t') # 구분자는 tab #print(lines) if lines[3].startswith(dong): #print(lines) print(lines[0] + ' ' + lines[1] + ' ' \ + lines[2] + ' ' + lines[3] + ' ' + lines[4]) line = f.readline() except Exception as e: print('err : ', e)
kangmihee/EX_python
pypro1/pack2/fio3.py
fio3.py
py
698
python
en
code
0
github-code
6
27260039230
"""We are the captain of our ships, and we stay 'till the end. We see our stories through. """ """290. Word Pattern """ class Solution: def wordPattern(self, pattern: str, str: str) -> bool: word_map, pattern_map = {}, {} words = str.split(" ") n = len(words) m = len(pattern) if m != n: return False for i in range(n): if pattern_map.get(pattern[i], -1) != word_map.get(words[i], -1): return False pattern_map[pattern[i]] = word_map[words[i]] = i return True
asperaa/back_to_grind
bactracking/290. Word Pattern.py
290. Word Pattern.py
py
582
python
en
code
1
github-code
6
73886305466
""" Author: Huang liuchao Contact: [email protected] Datetime: 2020/9/16 15:55 Software: PyCharm File description: """ import hlc_common_utils as hcu import onenet_warning_utils as owu import os import openpyxl from openpyxl import load_workbook import pandas as pd from pathlib import Path import win32com.client as win32 def test_files(dir_path): files_name = os.listdir(dir_path) # 得到所有文件的文件名 print (files_name) # file_path = os.path.join(dir_path, files_name) print('在设备-厂家-网元表的基础上,求出单一网元的唯一告警.........') print(file_path) def get_onenet_unique_warning(dir_path,to_dir_path): files_name=os.listdir(dir_path)#得到所有文件的文件名 for file_name in files_name: file_path=os.path.join(dir_path,file_name) print(file_path) get_onenet_unique_warning0(file_path,file_name,to_dir_path) print('全部单一告警统计完毕!') def get_onenet_unique_warning0 (file_path,file_name,to_dir_path): # files_name=os.listdir(dir_path)#得到所有文件的文件名 # file_path=os.path.join(dir_path,files_name) print('在设备-厂家-网元表的基础上,求出单一网元的唯一告警.........') # print(file_path) # for file in files_name: #读入文件 df = pd.read_excel(file_path, sheet_name=None) sheets_name = list(df) # 获得表头,为后边添加表头使用 # df_head = pd.read_excel(file_path, sheet_name=1) df_head_list = [['告警来源', '对象名称', '(告警)地市名称', '区县', '机房名称', '网元名称', '设备类型', '设备厂家', '网管告警级别', '告警标题', '厂家原始告警级别', '告警发生时间', '告警清除时间', '工单号', '派单所使用的规则', '派单状态', '未派单原因', '派单失败原因', '工单状态', '告警备注', '告警指纹fp0', '告警指纹fp1', '告警指纹fp2', '告警指纹fp3', '告警清除状态', '清除告警指纹fp0', '清除告警指纹fp1', '清除告警指纹fp2', '清除告警指纹fp3']] # print(df_head_list) #告警来源 对象名称 (告警)地市名称 区县 网元名称 设备厂家 网管告警级别 告警标题 厂家原始告警级别 告警发生时间 告警清除时间 工单号 派单所使用的规则 派单状态 未派单原因 派单失败原因 工单状态 告警备注 告警指纹fp0 告警指纹fp1 告警指纹fp2 告警指纹fp3 告警清除状态 清除告警指纹fp0 清除告警指纹fp1 清除告警指纹fp2 清除告警指纹fp3 remeber_nrows=[]#记录保存的行 nrows_values = [] for i in range (1,len(sheets_name)): onenet_list = [] others_list = [] #获取onenet_list和others_list for j in range(1,len(sheets_name)): if(i==j): df_onenet = pd.read_excel(file_path, sheet_name=i) # print('获取onenet_list') # print(sheets_name[5]) onenet_list=df_onenet['告警标题'].values.tolist() # print(onenet_list) else: df_others_net = pd.read_excel(file_path,sheet_name=j) # print('获取others_net_list') others_list += df_others_net['告警标题'].values.tolist() # if len(others_list)==0: # # break #对比onenet_list和others_list,求取行数 nrows=[] for j in range (len(onenet_list)): for k in range (len(others_list)): if onenet_list[j] not in others_list: nrows.append(j) remeber_nrows.append(j) nrows=list(set(nrows))#得到唯一告警的行数 remeber_nrows=list(set(remeber_nrows)) #根据行数开始写文件到第一个sheet中 if len(nrows)==0: print('%s网元不存在单一告警'%sheets_name[i]) else: print('将%s网元存在单一告警并将单一告警追加写入第一个sheet中'%sheets_name[i]) df_read_net = pd.read_excel(file_path, sheet_name=i) for n in range (len(nrows)): nrows_values.append(df_read_net.iloc[nrows[n]]) nrows_values= df_head_list+nrows_values df_write_net = pd.DataFrame(nrows_values) to_file_name = '单一告警'+file_name to_file_path = os.path.join(to_dir_path,to_file_name) df_write_net.to_excel(to_file_path,index=None,header=None) print('remeber_nrows=', remeber_nrows) # print(nrows_values) print('单一告警写入完毕') # print('onenet_list的长度',len(onenet_list)) # print('others_list的长度',len(others_list)) if __name__ == '__main__': #主函数测试用 abspath = os.path.abspath('../../../data') # 设置相对路径(基准路径) dir_path = abspath + r'\onenet_warning_data1\get_equipment_factory_netcell' to_dir_path = abspath + r'\onenet_warning_data1\get_onenet_unique_warning' get_onenet_unique_warning(dir_path,to_dir_path)
hlc0216/alarm_think
venv/Include/onenet_warning1/get_onenet_unique_warning.py
get_onenet_unique_warning.py
py
5,097
python
zh
code
1
github-code
6
35860574373
# Anything encountered around the map objects = { # things to potentially find at different map locations "empty" : { "desc" : "nothing here" }, "chest" : { "desc" : "a treasure chest full of valuables" }, "enemy" : { "desc" : "some armed and hostile warriors" }, "ally" : { "desc" : "some friendly locals who provide supplies" }, "oasis" : { "desc" : "an oasis in the middle of the desert" } }
bpoulin7/ben_p_rpg_map
objects.py
objects.py
py
479
python
en
code
0
github-code
6
24543736299
import sys data = sys.stdin.read().strip() sum = 0 for row in data.split('\n'): min = None max = None for value in row.split(): value = int(value) if min is None or value < min: min = value if max is None or value > max: max = value sum += max - min print('Sum:', sum)
jonaskrogell/adventofcode2017
2.py
2.py
py
335
python
en
code
0
github-code
6
23268756998
def check_true_matrix(column_, matrix_): for line in matrix_: if len(line) != column_: raise ValueError('Не правильно введены данные(Data entered incorrectly)') if __name__ == '__main__': line_A, column_A = map(int, (input("Количество строк и столбцов матрицы A(Matrix Size A): ").split())) matrix_A = [ list(map(int, input(f'Введите {i+1} строку матрицы А: ').split())) for i in range(line_A) ] check_true_matrix(column_A, matrix_A) line_B, column_B = map(int, (input("Количество строк и столбцов матрицы B(Matrix Size B): ").split())) matrix_B = [ list(map(int, input(f'Введите {i+1} строку матрицы B: ').split())) for i in range(line_B) ] check_true_matrix(column_B, matrix_B) if column_A!=line_A: raise ValueError('Не правильно введены данные(Data entered incorrectly)') new_matrix_multiplication = [ [0]*column_B for _ in range(line_A) ] for line_first_matrix in range( line_A ): for column_second_matrix in range( column_B ): a1 = matrix_A[line_first_matrix] a2 = [ matrix_B[i][column_second_matrix] for i in range( line_B ) ] new_matrix_multiplication[line_first_matrix][column_second_matrix] = sum( a1[i]*a2[i] for i in range(len(a1)) ) for k in new_matrix_multiplication: print(*k)
Salazhiev/CalculatorMatrix
multiplication_matrix.py
multiplication_matrix.py
py
1,479
python
en
code
0
github-code
6
36321955212
import os import re import sys import glob import shutil import pdftotext def extract_Text_pdf(pdfdir): print("Starting Text Extraction for pdf files......") number_of_files = str(len([item for item in os.listdir(pdfdir) if os.path.isfile(os.path.join(pdfdir, item))])) print("Processing ("+ number_of_files + ") .pdf files.....") os.chdir(pdfdir) file_list2 = [] for filename in glob.glob("*.pdf"): #Get the filename without the extension for nameing later base=os.path.basename(filename) filenameNoExt = os.path.splitext(base)[0] #Create a list of the text files file_list2.append("pdf_"+filenameNoExt+".txt") with open(filename, "rb") as f: pdf = pdftotext.PDF(f) filecontents = re.sub(' +', ' ', " ".join(pdf).replace("\n"," ").strip()) #Remove Non ASCII characters filecontents2 = re.sub(r'[^\x00-\x7f]',r'', filecontents) # content_list = list(filter(None, content_list)) with open ("pdf_"+filenameNoExt+".txt","a")as fp1: fp1.write(filecontents2) fp1.close() print("Text extraction completed for ("+ number_of_files + ") .pdf files ********************") pdf_files = 'to_process/' extract_Text_pdf(pdf_files)
mstatt/Udemy_HighSpeedDataAnalysis
3_PDF_Text_Extraction/pdf_text_extraction.py
pdf_text_extraction.py
py
1,272
python
en
code
2
github-code
6
25002203831
from osv import fields, osv class copy_verification_lines(osv.osv_memory): """ Copy Verification Lines """ _name = "copy.verification.lines" _description = "Copy Verification Lines" _columns = { 'audit_src': fields.many2one('mgmtsystem.audit','Choose audit'), } def copy(self, cr, uid, ids, context=None): # Code to copy verification lines from the chosen audit to the current one if context is None: context = {} audit_proxy = self.pool.get(context.get('active_model')) verification_line_proxy = self.pool.get('mgmtsystem.verification.line') src_id = self.read(cr, uid, ids, [], context=context)[0]['audit_src'] for line in audit_proxy.browse(cr, uid, src_id, context=context).line_ids: verification_line_proxy.create(cr,uid, { 'seq' : line.seq, 'name' : line.name, 'audit_id' : context['active_id'], 'procedure_id' : line.procedure_id.id, 'is_conformed' : False, }, context=context) return {'type':'ir.actions.act_window_close'} copy_verification_lines() # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
factorlibre/openerp-extra-6.1
mgmtsystem_audit/wizard/copy_verification_lines.py
copy_verification_lines.py
py
1,235
python
en
code
9
github-code
6
28839405440
''' Eduardi Cruz DIV B Ejercicio 03 Es la gala final de Gran Hermano y la producción nos pide un programa para contar los votos de los televidentes y saber cuál será el participante que ganará el juego. Los participantes finalistas son: Nacho, Julieta y Marcos. El televidente debe ingresar: ● Nombre del votante ● Edad del votante (debe ser mayor a 13) ● Género del votante (masculino, femenino, otro) ● El nombre del participante a quien le dará el voto positivo. No se sabe cuántos votos entrarán durante la gala. Se debe informar al usuario: A. El promedio de edad de las votantes de género femenino B. Cantidad de personas de género masculino entre 25 y 40 años que votaron a Nacho o Julieta. C. Nombre del votante más joven que votó a Nacho. D. Nombre de cada participante y porcentaje de los votos qué recibió. E. El nombre del participante que ganó el reality (El que tiene más votos) ''' def mostrar(): respuesta = "si" acu_edad_femenino = 0 con_edad_femenino = 0 con_nacho_julieta = 0 con_votos_total = 0 con_votos_nacho = 0 con_votos_julieta = 0 con_votos_marcos = 0 porcentaje_nacho = 0 porcentaje_julieta = 0 porcentaje_marcos = 0 msj_porcentaje_nacho = "" msj_porcentaje_julieta = "" msj_porcentaje_marcos = "" ban_votante_mas_joven = True votante_mas_joven_nombre = "" votante_mas_joven_edad = 0 msj_votante_mas_joven = "" participante_ganador = "" while respuesta == "si": nombre_votante = input("Ingrese su nombre: ") edad_votante = int(input("Ingrese su edad: ")) while edad_votante < 14: edad_votante = int(input("ERROR: Reingrese su edad: ")) genero_votante = input("Ingrese genero: masculino, femenino, nobinario ") while genero_votante != "masculino" and genero_votante != "femenino" and genero_votante != "nobinario": genero_votante = input("ERROR: Reingrese genero: masculino, femenino, nobinario ") nombre_participante = input("Ingrese nombre del participante: Nacho, Julieta y Marcos ") while nombre_participante != "Nacho" and nombre_participante != "Julieta" and nombre_participante != "Marcos": nombre_participante = input("ERROR: Reingrese nombre del participante: Nacho, Julieta y Marcos ") respuesta = input("Quiere seguir votando? ") # A. El promedio de edad de las votantes de género femenino if genero_votante == "femenino": acu_edad_femenino = acu_edad_femenino + edad_votante con_edad_femenino += 1 # B. Cantidad de personas de género masculino entre 25 y 40 años que votaron a Nacho o Julieta. if genero_votante == "masculino": if (edad_votante > 24 and edad_votante < 41) and (nombre_participante == "Nacho" or nombre_participante == "Julieta"): con_nacho_julieta += 1 # D. Nombre de cada participante y porcentaje de los votos qué recibió. con_votos_total = con_votos_total + 1 # match nombre_participante: # case "Nacho": # con_votos_nacho = con_votos_nacho + 1 # # C. Nombre del votante más joven que votó a Nacho. # if ban_votante_mas_joven == True or edad_votante < votante_mas_joven_edad: # votante_mas_joven_nombre = nombre_votante # votante_mas_joven_edad = edad_votante # ban_votante_mas_joven = False # case "Julieta": # con_votos_julieta = con_votos_julieta + 1 # case "Marcos": # con_votos_marcos = con_votos_marcos + 1 if nombre_participante == "Nacho": con_votos_nacho = con_votos_nacho + 1 # C. Nombre del votante más joven que votó a Nacho. if ban_votante_mas_joven == True or edad_votante < votante_mas_joven_edad: votante_mas_joven_nombre = nombre_votante votante_mas_joven_edad = edad_votante ban_votante_mas_joven = False elif nombre_participante == "Julieta": con_votos_julieta = con_votos_julieta + 1 else: con_votos_marcos = con_votos_marcos + 1 #FIN WHILE # A. PROMEDIO FEMENINO if con_edad_femenino > 0: promedio_femenino = acu_edad_femenino / con_edad_femenino else: promedio_femenino = "NO HUBO" # D. Nombre de cada participante y porcentaje de los votos qué recibió. # TOTAL ______ 100% # 10 PERSONA ______ X = 20 * 100 / TOTAL = % if con_votos_nacho > 0: porcentaje_nacho = con_votos_nacho * 100 / con_votos_total msj_porcentaje_nacho = f"Porcentaje de votos de Nacho: {porcentaje_nacho}%" # C. Nombre del votante más joven que votó a Nacho. msj_votante_mas_joven = f"El votante mas joven que voto a nacho: {votante_mas_joven_nombre} {votante_mas_joven_edad}" else: msj_porcentaje_nacho = "Porcentaje de votos de Nacho: 0" msj_votante_mas_joven = "El votante mas joven que voto a nacho: NO HUBO" if con_votos_julieta > 0: porcentaje_julieta = con_votos_julieta * 100 / con_votos_total msj_porcentaje_julieta = f"Porcentaje de votos de Julieta: {porcentaje_julieta}&" else: msj_porcentaje_julieta = "Porcentaje de votos de Julieta: 0" if con_votos_marcos > 0: porcentaje_marcos = con_votos_marcos * 100 / con_votos_total msj_porcentaje_marcos = f"Porcentaje de votos de Marcos: {porcentaje_marcos}%" else: msj_porcentaje_marcos = "Porcentaje de votos de Marcos: 0" # E. El nombre del participante que ganó el reality (El que tiene más votos) if con_votos_marcos > con_votos_nacho and con_votos_marcos > con_nacho_julieta: participante_ganador = "El ganador es Marcos" elif con_votos_nacho > con_votos_julieta: participante_ganador = "El ganador es Nacho" else: participante_ganador = "El ganador es Julieta" print(f"El promedio de mujeres que votaron: {promedio_femenino}") print(f"Personas entre 25-40 masculinos que votaron a Nacho y Julieta: {con_nacho_julieta}") print(msj_porcentaje_nacho) print(msj_porcentaje_julieta) print(msj_porcentaje_marcos) print(msj_votante_mas_joven) print(participante_ganador) mostrar()
EduardoCruzfm/UTN
programacion_1/ejercicios_phyton/ejercicio_03.py
ejercicio_03.py
py
6,445
python
es
code
0
github-code
6
32584103329
import dash_core_components as dcc import dash_html_components as html import plotly.express as px from dash.dependencies import Input, Output import dash_bootstrap_components as dbc from app import app from apps import theme_explorer as te, text import util """ ===================================================================== Helper functions and components """ df = px.data.gapminder() code = util.get_code_file("dash_bootstrap_templates_app.py") copy_code_div = util.get_copy_code_div(code, id="copy_template_code") # make control panel use_templates = dbc.RadioItems( options=[ {"label": "Use figure templates from dash-bootstrap-templates", "value": 1}, {"label": "Use Plotly default figure template", "value": 2}, ], value=1, id="use_figure_template", ) control_panel_text = dcc.Markdown( text.dash_bootstrap_templates_text, className="border mb-5 p-4" ) # needed because the theme dropdown also updates "css" on Theme Explorer page but not here dummy_output = html.Div(id="css", className='d-none') control_panel = [control_panel_text, te.boostrap_card, use_templates, dummy_output] carousel = dbc.Carousel( ride="carousel", items=[ { "key": "1", "src": "https://user-images.githubusercontent.com/72614349/129459807-30c22ffe-7a8c-44b9-9555-6cfd50ec355b.png", }, { "key": "2", "src": "https://user-images.githubusercontent.com/72614349/129459808-40032148-82e1-47ce-a49a-05e598c69400.png", }, ], ) carousel_text = dcc.Markdown(text.dash_bootstrap_templates_app_text) """ =============================================================================== Layout """ layout = dbc.Container( [ util.header, dbc.Row( [ dbc.Col(control_panel, lg=4, sm=12), dbc.Col( html.Div( id="db_templates_sample_app", className="mx-1 mb-4 shadow p-4", ), lg=8, sm=12, ), ], ), dbc.Row( [ dbc.Col([carousel_text, carousel], lg=4, sm=12), dbc.Col(html.Div(copy_code_div,), lg=8, sm=12,), ], ), ], fluid=True, id="bootstrap_templates", ) """ ===================================================================== Display Sample App based on theme selected """ @app.callback( Output("db_templates_sample_app", "children"), Input("themes", "value"), Input("use_figure_template", "value"), ) def update_graphs(theme, use_template): template = util.url_dbc_themes[theme].lower() if use_template == 1 else {} heading_txt = ( "App with dash-bootstrap-templates" if use_template == 1 else "App with Plotly default figure template" ) heading = html.H3(heading_txt, className="bg-primary text-white p-2") dff = df[df.year.between(1952, 1982)] dff = dff[dff.continent.isin(df.continent.unique()[1:])] line_fig = px.line( dff, x="year", y="gdpPercap", color="continent", line_group="country", template=template, ) dff = dff[dff.year == 1982] scatter_fig = px.scatter( dff, x="lifeExp", y="gdpPercap", size="pop", color="pop", size_max=60, template=template, ).update_traces(marker_opacity=0.8) avg_lifeExp = (dff["lifeExp"] * dff["pop"]).sum() / dff["pop"].sum() map_fig = px.choropleth( dff, locations="iso_alpha", color="lifeExp", title="%.0f World Average Life Expectancy was %.1f years" % (1982, avg_lifeExp), template=template, ) hist_fig = px.histogram( dff, x="lifeExp", nbins=10, title="Life Expectancy", template=template ) graph_height = 300 graphs = html.Div( [ dbc.Row( [ dbc.Col( dcc.Graph(figure=line_fig, style={"height": graph_height}), lg=6 ), dbc.Col( dcc.Graph(figure=scatter_fig, style={"height": graph_height}), lg=6, ), ], className="mt-4", ), dbc.Row( [ dbc.Col( dcc.Graph(figure=hist_fig, style={"height": graph_height}), lg=6 ), dbc.Col( dcc.Graph(figure=map_fig, style={"height": graph_height}), lg=6 ), ], className="mt-4", ), ] ) # These buttons are added to the app just to show the Boostrap theme colors buttons = html.Div( [ dbc.Button("Primary", color="primary", className="mr-1"), dbc.Button("Secondary", color="secondary", className="mr-1"), dbc.Button("Success", color="success", className="mr-1"), dbc.Button("Warning", color="warning", className="mr-1"), dbc.Button("Danger", color="danger", className="mr-1"), dbc.Button("Info", color="info", className="mr-1"), dbc.Button("Light", color="light", className="mr-1"), dbc.Button("Dark", color="dark", className="mr-1"), dbc.Button("Link", color="link"), ], ) return [heading, buttons, graphs] @app.callback( Output("bootstrap_templates", "className"), Input("light_dark", "value"), ) def update_css(value): return "dbc_light" if value == "Light Themes" else "dbc_dark"
thigbee/dashBootstrapThemeExplorer
apps/bootstrap_templates.py
bootstrap_templates.py
py
5,729
python
en
code
0
github-code
6
41244848850
'''Desenvolva um programa que leia o nome, idade, e sexo de 4 pessoas. No final do programa mostre: * A média de idade do grupo. *Qual o nome do homem mais velho. *Quantas mulheres tem menos de 21 anos ''' soma = 0 total = 0 maioridadehomem = 0 nomevelho = '' totmulher20 = 0 for pessoa in range(1,5): nome = str(input('Digite o nome: ')) idade = int(input('Digite a idade: ')) sexo = str(input('Masculo ou Femino? ')) soma = soma + idade total = total + 1 media = soma / total if pessoa == 1 and sexo in 'Mn': maioridadehomem = idade nomevelho = nome if sexo in 'Mm' and idade > maioridadehomem: maioridadehomem = idade nomevelho = nome if sexo in'Ff' and idade < 20: totmulher20 = totmulher20 + 1 print('A média de idade do grupo é de {} anos'.format(media)) print('O homem mais velho tem {} anos e se chama {}'.format(maioridadehomem, nomevelho)) print('O total de mulheres com menos de 20 anos é de {} Mulheres'.format(totmulher20))
andrematos90/Python
CursoEmVideo/Módulo 2/Desafio 056.py
Desafio 056.py
py
1,040
python
pt
code
0
github-code
6
8012099265
#!/usr/bin/env python3 import matplotlib.pyplot as plt import numpy as np import argparse parser = argparse.ArgumentParser() parser.add_argument("file", type=str, help="data file") parser.add_argument("-R", "--rd", type=float, default=1e3, help="resistor on drain") parser.add_argument("-D", "--diagnose", action="store_true", help="plot aux polynomial") parser.add_argument("-I", "--init", type=float, default=10, help="iterative seed") parser.add_argument("-G", "--gain", type=float, default=25, help="gain desired (dB)") parser.add_argument("-P", "--plot", action="store_true", help="plot fitted polynomial") args = parser.parse_args() try: data = np.genfromtxt(args.file) except OSError: print("File {0:s} does not exist".format(args.file)) exit() vgs_data, id_data = data[:, 0], data[:, 1] if args.diagnose: vx = np.linspace(0, 10, 1000) vy1 = [((vgs_data - v) @ id_data - np.square(vgs_data - v) @ id_data * np.sum(np.power(vgs_data - v, 3)) / np.sum(np.power(vgs_data - v, 4))) for v in vx] #vy2 = [((vgs_data @ id_data - # np.square(vgs_data - v) @ id_data * np.sum(np.power(vgs_data - v, 3)) / # np.sum(np.power(vgs_data - v, 4))) / np.sum(id_data)) # for v in vx] #vy3 = [(vgs_data @ id_data - np.sum(id_data) * v + # np.square(vgs_data - v) @ id_data * np.sum(np.power(vgs_data - v, 3)) / # np.sum(np.power(vgs_data - v, 4)))] plt.plot(vx, vy1, color="r") #plt.plot(vx, vy2, color="g") #plt.plot(vx, vy3, color="b") plt.xlabel(r"$v_t$") plt.ylabel(r"$p(v_t$)") plt.grid() plt.show() exit() # Biseccion va, vt = 0, 0 vb = args.init while not np.isclose(va, vb, rtol=10 * np.finfo(float).eps, atol=0.0): vt = (va + vb) / 2 d = ((vgs_data - vt) @ id_data - (np.square(vgs_data - vt) @ id_data / np.sum(np.power(vgs_data - vt, 4))) * np.sum(np.power(vgs_data - vt, 3))) if d > 0: vb = vt elif d < 0: va = vt else: va = vb k = id_data @ np.square(vgs_data - vt) / np.sum(np.power(vgs_data - vt, 4)) k2 = id_data @ (vgs_data - vt) / np.sum(np.power(vgs_data - vt, 3)) if not np.isclose(k, k2, rtol=10 * np.finfo(float).eps, atol=0.0): print("fit invalid, try a different seed estimated from aux polynomial") exit() gain_target = 10 ** (args.gain / 20) vgsq = gain_target / (2 * k * args.rd) + vt idq = k * (vgsq - vt) ** 2 print("k = {:3.3f} mA/V^2".format(k * 1000)) print("Vt = {:2.3f} V\n".format(vt)) print("Gain = {} dB".format(args.gain)) print("Rd = {} ohm".format(args.rd)) print("Vgsq = {:3.3} V".format(vgsq)) print("Idq = {:2.4} mA".format(1000 * idq)) if args.plot: vgs = np.linspace(vgs_data[0], vgs_data[len(vgs_data) - 1], 1000) i_d = k * np.square(vgs - vt) plt.title(r"$K = {0:3.3f} mA/V^2, V_t = {1:2.3f}V$".format(k * 1000, vt)) plt.xlabel(r"$v_{gs}$") plt.ylabel(r"$i_d$") plt.grid() plt.plot(vgs_data, id_data, marker="o", linestyle=" ") plt.plot(vgs, i_d, linestyle="-") plt.show()
mvallina/trts
nmosfit.py
nmosfit.py
py
3,076
python
en
code
1
github-code
6
35003317168
# Домашняя работа по задаче 2.4 курса Python 3 # Задача 2.4. (Условия) # Пункт A. # Напишите функцию, которая удаляет все восклицательные знаки из заданной строк. # Например, # foo("Hi! Hello!") -> "Hi Hello" # foo("") -> "" # foo("Oh, no!!!") -> "Oh, no" # def remove_exclamation_marks(s): # pass # Пункт B. # Удалите восклицательный знак из конца строки. # remove("Hi!") == "Hi" # remove("Hi!!!") == "Hi!!" # remove("!Hi") == "!Hi" # def remove_last_em(s): # pass # Дополнительно # Пункт С. # Удалите слова из предложения, если они содержат ровно один восклицательный знак. # Слова разделены одним пробелом. # Например, # remove("Hi!") === "" # remove("Hi! Hi!") === "" # remove("Hi! Hi! Hi!") === "" # remove("Hi Hi! Hi!") === "Hi" # remove("Hi! !Hi Hi!") === "" # remove("Hi! Hi!! Hi!") === "Hi!!" # remove("Hi! !Hi! Hi!") === "!Hi!" # def remove_word_with_one_em(s): # pass print('\n') # Пустая строка для разделения вывода в консоли # Напишем проверочный текст (в переменную) text = '! Это - п!роверочный текст, для! !проверки !работы! ф-й дл!я удаления восклицательных!! знаков!!!' print('Полный текст для сравнения с результатами:', '\n\n', text) # Для наглядности выведем его в консоль # Решение п.А. print('\n', 'Решение А. (убраны все "!")') # Пустая строка для разделения вывода в консоли def remove_exclamation_marks(text: str) -> str: '''Удаляет все "!" из строки''' return text.replace('!', '') # Проверка работы ф-и (п. А.) print(remove_exclamation_marks(text)) # Решение п.B. print('\n', 'Решение B. (убран последний "!")') # Пустая строка для разделения вывода в консоли def remove_last_em(text: str) -> str: '''Убирает "!" в конце строки, если он есть.''' if text[-1] == "!": return text[:-1] else: return text # Проверка работы ф-и (п. А.) print(remove_last_em(text)) # Решение 2. п.C. # Т.к. после Решения 1 я сообразил, что Сплит делает именно список, # Подумал, что сразу через него будет короче. Ошибся :). # Решил, что надо как-то иначе написать код (чтобы не повторять Решение 1 по структуре) print('\n', 'Решение C2. (убраны все слова с 1 "!")') # Пустая строка для разделения вывода в консоли def remove_word_with_one_em(text: str) -> str: '''Удаляет слово из строки если оно содержит ровно один "!"''' word_list = text.split(' ') # получаем текст в виде списка слов. word_count = len(word_list) # определяем максимальное кол-во слов count = 0 # счетчик слов while count < word_count: # проходим по каждому слову в тексте symbol_count = 0 # задаем счетчик для "!" word = word_list[count] # получаем каждое слово для проверки for letter in range(len(word)): # Проходим по каждому символу слова if word[letter] == '!': symbol_count +=1 # Считаем "!" if symbol_count == 1: del word_list[count] # удаляем слово из списка по условию word_count -= 1 # уменьшаем кол-во слов в списке count -= 1 # т.к. индекс должен остаться прежним при удалении элемента count += 1 return ' '.join(word_list) # Проверка Решения 2. п.С print(remove_word_with_one_em(text)) # Конец проверки. Конец Решения 2. # Решение 3. п.C. # Оптимизированное Решение 1. (через enumerate) [меньше строк и действий] # И сразу ч-з 1 список, не прописывая каждый раз все через split print('\n', 'Решение C3. (убраны все слова с 1 "!")') # Пустая строка для разделения вывода в консоли def _remove_word_with_one_em(text: str) -> str: '''Удаляет слово из строки если оно содержит ровно один "!"''' new_text = '' # будущая возвращенная строка word_list = text.split(' ') # получаем текст в виде списка слов. for word in word_list: # проходим по каждому слову в тексте (списке) symbol_count = 0 # задаем счетчик для "!" for letter in range(len(word)): # Проходим по каждому символу слова if word[letter] == '!': symbol_count +=1 # Считаем "!" if symbol_count != 1: new_text += word + ' ' # собираем текст заново return new_text.strip(' ') # С удалением последнего пробела # Проверка решения 3. п.С print(_remove_word_with_one_em(text)) # Конец решения 3. print('\n') # Пустая строка для разделения вывода в консоли
PavelVes/project_01
HomeWorks_for_course_Python_3/HW_lvl_1/hw_task_2.4.py
hw_task_2.4.py
py
6,215
python
ru
code
0
github-code
6
4406068121
import random import operator class Node(): def __init__(self, val): self.val = val self.next = None def make_linklist(datas): head, tail = None, None for d in datas: node = Node(d) if not head: head = node tail = node else: tail.next = node tail = node return head def dump_to_list(head): l = [] p = head while p: l.append(p.val) p = p.next return l def insert_sort(head): if not head or not head.next: return head cur = head.next head.next = None while cur: next = cur.next cur.next = None prev = None p = head while p and p.val <= cur.val: prev = p p = p.next if not prev: cur.next = head head = cur else: prev.next = cur cur.next = p cur = next return head def get_smallest_node(head): small_pre = None small = head pre = head cur = head.next while cur: if cur.val < small.val: small_pre = pre small = cur pre = cur cur = cur.next return small_pre, small def selection_sort(head): if not head or not head.next: return head tail = None cur = head while cur: small_prev, small = get_smallest_node(cur) if small_prev: small_prev.next = small.next if cur == small: cur = cur.next small.next = None if not tail: head = small tail = small else: tail.next = small tail = small return head def get_biggest_node(head): big_pre = None big = head pre = head cur = head.next while cur: if cur.val > big.val: big_pre = pre big = cur pre = cur cur = cur.next return big_pre, big def selection_sort2(head): if not head or not head.next: return head new_head = None while head: big_prev, big = get_biggest_node(head) if big_prev: big_prev.next = big.next if head == big: head = head.next big.next = new_head new_head = big return new_head def test(count, maxval): datas = [] for _ in range(count): r = random.randint(0, maxval) datas.append(r) head = make_linklist(datas) head = insert_sort(head) l = dump_to_list(head) if not operator.eq(sorted(datas), l): raise Exception('Error') head = make_linklist(datas) head = selection_sort(head) l = dump_to_list(head) if not operator.eq(sorted(datas), l): raise Exception('Error') head = make_linklist(datas) head = selection_sort2(head) l = dump_to_list(head) if not operator.eq(sorted(datas), l): raise Exception('Error') if __name__ == '__main__': test(0, 100) test(1, 100) test(2, 100) test(10, 100) test(10, 100) test(100, 100) test(1000, 100) test(1000, 10000)
guzhoudiaoke/data_structure_and_algorithms
coding_interview_guide/2_link_list/16_selection_sort/selection_sort.py
selection_sort.py
py
3,152
python
en
code
0
github-code
6
74190873788
__author__ = "ALEX-CHUN-YU ([email protected])" from sklearn.datasets import load_wine from sklearn.ensemble import RandomForestClassifier from sklearn import preprocessing from sklearn.model_selection import validation_curve from sklearn.model_selection import GridSearchCV from sklearn_evaluation.plot import grid_search import numpy as np import matplotlib.pyplot as plt from sklearn.externals import joblib import json # Random Forest Classifier Alogorithm class RFC(): # RFC Initialize def __init__(self, name): self.model_name = 'model/' + name + '_rfc' self.image_name = 'image/' + name + '_rfc' # RFC Parameter self.n_estimators = 10 self.criterion = 'gini' self.max_depth = None self.min_samples_split = 2 self.min_samples_leaf = 1 self.min_weight_fraction_leaf = 0.0 self.max_features = 'auto' self.max_leaf_nodes = None self.min_impurity_decrease = 0.0 self.min_impurity_split = None self.bootstrap = True self.oob_score = False self.n_jobs = -1 self.random_state = None self.verbose = 0 self.warm_start = False self.class_weight = None #(Validation Parameter) GridSearchCV, validation_curve self.cv = 10 self.criterion_range = ['gini', 'entropy'] # 2 * 3 self.max_features_range = ['sqrt', 'log2', 'auto'] self.n_estimators_range = [10, 50, 100, 700, 1000]# 5 # Accuracy(GridSearchCV application) self.score = 0 self.scoring = 'accuracy'# f1、recall、 precision, your target must binary in sklearn(但貼心的 sklearn 還是有提供 f1_micro、f1_macro...) # Normalization self.normalization = False # Find Best Parameter(RFC 有沒有 normalization 都沒差? 暫且留著) def tuning_parameters(self, X, y): # 第一次 tuning (找出 best n_estimators 和 best max_features) # n_estimators 叢林中要有幾顆樹(default = 10) # criterion 計算資訊量的的方式(劃分樹分支時所需要的), gini 或 entropy(default = 'gini') # max_features 選擇最適合屬性時劃分的特徵不能超過此值 clf = RandomForestClassifier(n_estimators = self.n_estimators, criterion = self.criterion, max_depth = self.max_depth, min_samples_split = self.min_samples_split, min_samples_leaf = self.min_samples_leaf, min_weight_fraction_leaf = self.min_weight_fraction_leaf, max_features = self.max_features, max_leaf_nodes = self.max_leaf_nodes, min_impurity_decrease = self.min_impurity_decrease, min_impurity_split = self.min_impurity_split, bootstrap = self.bootstrap, oob_score = self.oob_score, n_jobs = self.n_jobs, random_state = self.random_state, verbose = self.verbose, warm_start = self.warm_start, class_weight = self.class_weight) parameter_candidates = {# Set the parameter candidates 'n_estimators': self.n_estimators_range, 'criterion': self.criterion_range, 'max_features': self.max_features_range} clf_gscv = GridSearchCV(estimator = clf, param_grid = parameter_candidates, cv = self.cv, scoring = self.scoring, n_jobs = self.n_jobs)# Create a classifier with the parameter candidates clf_gscv.fit(X, y)# No Normalization normalization_clf_gscv = clf_gscv normalization_clf_gscv.fit(preprocessing.scale(X), y)# Normalization if normalization_clf_gscv.best_score_ > clf_gscv.best_score_: self.normalization = True X = preprocessing.scale(X) self.n_estimators = normalization_clf_gscv.best_estimator_.n_estimators self.criterion = normalization_clf_gscv.best_estimator_.criterion self.max_features = normalization_clf_gscv.best_estimator_.max_features self.score = normalization_clf_gscv.best_score_ clf = normalization_clf_gscv else: self.n_estimators = clf_gscv.best_estimator_.n_estimators self.criterion = clf_gscv.best_estimator_.criterion self.max_features = clf_gscv.best_estimator_.max_features self.score = clf_gscv.best_score_ clf = clf_gscv # # Print out the results # print('Best score for training data:', clf_gscv.best_score_) # print('Best n_estimators:',clf_gscv.best_estimator_.n_estimators) # print('Best max_features:',clf_gscv.best_estimator_.max_features) # print(normalization_clf_gscv.best_score_) # print(clf.cv_results_['params']) criterion = [x['criterion'] for x in clf.cv_results_['params']] # print(criterion) max_features = [x['max_features'] for x in clf.cv_results_['params']] # print(max_features) plt.title("Validation Curve with RFC") plt.xlabel("Value Of n_estimators For RFC") plt.ylabel(self.scoring) # 6 * 5 mean_scores = np.array(clf.cv_results_['mean_test_score']).reshape(len(self.criterion_range) * len(self.max_features_range), len(self.n_estimators_range)) std_scores = np.array(clf.cv_results_['std_test_score']).reshape(len(self.criterion_range) * len(self.max_features_range), len(self.n_estimators_range)) # print(mean_scores) # print(std_scores) ind = 0 for i in range(0, len(criterion), len(self.n_estimators_range)): plt.plot(self.n_estimators_range, mean_scores[ind], "-o", label = 'criterion: ' + criterion[i] + ', max_features: ' + max_features[i]) plt.fill_between(self.n_estimators_range, mean_scores[ind] - std_scores[ind], mean_scores[ind] + std_scores[ind], alpha = 0.2) ind += 1 plt.legend(loc = "best") # best location plt.savefig(self.image_name + '.png')# save image plt.close() print("RFC Save Image Finished") print("RFC Tuning Parameters Finished") # Produce Model def train(self, X, y): # Train clf = RandomForestClassifier(n_estimators = self.n_estimators, criterion = self.criterion, max_depth = self.max_depth, min_samples_split = self.min_samples_split, min_samples_leaf = self.min_samples_leaf, min_weight_fraction_leaf = self.min_weight_fraction_leaf, max_features = self.max_features, max_leaf_nodes = self.max_leaf_nodes, min_impurity_decrease = self.min_impurity_decrease, min_impurity_split = self.min_impurity_split, bootstrap = self.bootstrap, oob_score = self.oob_score, n_jobs = self.n_jobs, random_state = self.random_state, verbose = self.verbose, warm_start = self.warm_start, class_weight = self.class_weight) if self.normalization == True: X = preprocessing.scale(X) clf.fit(X, y) # 透過 joblib 存 model joblib.dump(clf, self.model_name + '.pkl') print("RFC Save Model Finished") # 儲存參數、準確性 parameters = {} parameters['parameters'] = [] parameters['parameters'].append({ 'n_estimators': self.n_estimators, 'criterion': self.criterion, 'max_features': self.max_features, }) parameters['scoring'] = [] parameters['scoring'].append({ 'valid_score': self.score }) parameters['preprocessing'] = [] parameters['preprocessing'].append({ 'normalization': self.normalization }) with open(self.model_name + '_parameters', 'w', encoding = "utf-8") as rfcf: json.dump(parameters, rfcf) print("RFC Save Parameters Finished") if __name__ == '__main__': X, y = load_wine().data, load_wine().target name = 'wine' rfc = RFC(name) rfc.tuning_parameters(X, y) rfc.train(X, y) # 載入參數並顯示出來 with open(rfc.model_name + '_parameters') as json_file: data = json.load(json_file) for p in data['parameters']: print('n_estimators: ' + str(p['n_estimators'])) print('criterion: ' + p['criterion']) print('max_features: ' + p['max_features']) # 不同的評分標準 key 要做更改 for s in data['scoring']: print('valid_score: ' + str(s['valid_score'])) for p in data['preprocessing']: print('normalization: ' + str(p['normalization'])) normalization = p['normalization'] # 載入 model 並去預測 if normalization == True: X = preprocessing.scale(X) rfc = joblib.load(rfc.model_name + '.pkl') print(rfc.score(X, y))
Alex-CHUN-YU/Recommender-System
scenario_algorithm_analysis/rfc.py
rfc.py
py
9,077
python
en
code
0
github-code
6
28713863068
import torch import pandas as pd import os from shutil import copy from utils import fix_randomness, save_to_df, _logger, report_results, get_nonexistant_path, copy_Files from dataloader.dataloader import data_generator from trainer.training_evaluation import cross_domain_test from datetime import datetime from itertools import product from args import args import wandb start_time = datetime.now() device = torch.device(args.device) da_method = args.da_method save_dir = args.save_dir data_type = args.selected_dataset data_path = f"./data/{data_type}" base_model_type = args.base_model experiment_description = args.experiment_description if not os.path.exists(save_dir): os.mkdir(save_dir) exec(f'from trainer.{da_method} import cross_domain_train') exec(f'from config_files.{data_type}_Configs import Config as Configs') exec(f'from models.models import {base_model_type} as base_model') configs = Configs() # os.environ["WANDB_MODE"] = "dryrun" os.environ["WANDB_SILENT"] = 'true' os.environ['CUDA_LAUNCH_BLOCKING'] = "1" # torch.backends.cudnn.enabled = False # another solution for lstm lunch faiulure issue def main_train_cd(): # find out the domains IDs data_files = os.listdir(data_path) data_files = [i for i in data_files if "train" in i] sources = [i[6] for i in data_files] src_tgt_product = [sources, sources] simple_column_names = ['Run ID', 'source_loss', 'source_acc', 'target_loss', 'target_acc',] column_names_mean = ['Scenario', 'Source_only_loss_mean', 'Source_only_acc_mean', f'{da_method}_loss_mean', f'{da_method}_acc_mean', f'Source_only_loss_std', 'Source_only_acc_std', f'{da_method}_loss_std', f'{da_method}_acc_std'] simple_df= pd.DataFrame(columns=simple_column_names) mean_df = pd.DataFrame(columns=column_names_mean) # Logging # cwd = os.getcwd() # exp_log_dir = os.path.join(r"D:\Autoregressive Domain Adaptation for Time series data\Last",save_dir, experiment_description, f"{da_method}_{data_type}_{args.run_description}") exp_log_dir = os.path.join(os.getcwd(),save_dir, experiment_description, f"{da_method}_{data_type}_{args.run_description}") exp_log_dir = get_nonexistant_path(exp_log_dir) # os.makedirs(exp_log_dir, exist_ok=True) # copy(f"/home/mohamed/SLARADA/config_files/{data_type}_configs.py", f"{exp_log_dir}/{data_type}_configs.py") # copy(f"/home/mohamed/SLARADA/trainer/{da_method}.py", f"{exp_log_dir}/{da_method}_script.py") # copy("/home/mohamed/SLARADA/args.py", f"{exp_log_dir}/args.py") copy_Files(exp_log_dir, data_type, da_method) # loop through domains # loop through domains counter = 0 src_counter = 0 for src_id, tgt_id in product(*src_tgt_product): # for src_id in ['a', 'b', 'c']: # for tgt_id in ['a', 'b','c']: if src_id != tgt_id: # prepare save directory # specify number of consecutive runs for run_id in range(args.num_runs): fix_randomness(run_id) # Logging log_dir = os.path.join(exp_log_dir, src_id + "_to_" + tgt_id + "_run_"+ str(run_id)) os.makedirs(log_dir, exist_ok=True) log_file_name = os.path.join(log_dir, f"logs_{datetime.now().strftime('%d_%m_%Y_%H_%M_%S')}.log") logger = _logger(log_file_name) logger.debug("=" * 45) logger.debug(f'Dataset: {data_type}') logger.debug(f'Method: {da_method}') logger.debug("=" * 45) logger.debug(f'Source: {src_id} ---> Target: {tgt_id}') logger.debug(f'Run ID: {run_id}') logger.debug("=" * 45) # Load datasets src_train_dl, src_valid_dl, src_test_dl = data_generator(data_path, src_id, configs) tgt_train_dl, tgt_valid_dl, tgt_test_dl = data_generator(data_path, tgt_id, configs) if args.tensorboard: wandb.init(project="SLARDA", group = f'{da_method}_{data_type}', name=f'{src_id}_to_{tgt_id}_run_{run_id}', config=configs, sync_tensorboard=False, reinit=True, dir=r"./visualize/", ) source_model, target_model = cross_domain_train(src_train_dl, src_valid_dl, src_test_dl, tgt_train_dl, tgt_valid_dl, base_model, src_id, tgt_id, device, logger, configs) scores = cross_domain_test(source_model, target_model, src_id, tgt_id, src_train_dl, tgt_train_dl, src_test_dl, tgt_test_dl, device, log_dir, logger) run_name = f"domain_{src_id}_run_{run_id}" outs = (run_name,) + scores simple_df.loc[counter] = outs counter += 1 input_data = [f"{src_id}-->{tgt_id}"] input_data.extend(simple_df.iloc[-args.num_runs:, 1:].mean().array) input_data.extend(simple_df.iloc[-args.num_runs:, 1:].std().array) mean_df.loc[src_counter] = input_data src_counter += 1 # Printing and saving final results print(simple_df.to_string()) print(mean_df.to_string()) printed_results = mean_df[['Scenario', 'Source_only_acc_mean', 'Source_only_acc_std', f'{da_method}_acc_mean', f'{da_method}_acc_std']] mean = mean_df[['Source_only_acc_mean', 'Source_only_acc_std', f'{da_method}_acc_mean', f'{da_method}_acc_std']].mean() printed_results.loc[len(printed_results)] = mean printed_results.at[len(printed_results)-1, 'Scenario'] = 'Average' logger.debug(f"Total training time is {datetime.now() - start_time}") logger.debug('=' * 45) logger.debug(f'Results using: {da_method}') logger.debug('=' * 45) logger.debug(mean_df.to_string()) logger.debug(printed_results.to_string()) print_res_name = os.path.basename(exp_log_dir) simple_df.to_excel(f'{exp_log_dir}/full_res_results_{print_res_name}.xlsx') printed_results.to_excel(f'{exp_log_dir}/printed_results_{print_res_name}.xlsx') if args.tensorboard: wandb.log({"Full_results": wandb.Table(dataframe=simple_df)}) wandb.log({"Printed_results": wandb.Table(dataframe=printed_results)}) if __name__ == "__main__": wandb.config = configs main_train_cd()
mohamedr002/SLARDA
Autorgressive_Adaptation/train_CD.py
train_CD.py
py
6,844
python
en
code
23
github-code
6
39763998514
import streamlit as st import os from PIL import Image from ultralytics import YOLO import re # Load the model model = YOLO("model.pt") # Set the path for results output_dir = 'temp_out_res' if not os.path.exists(output_dir): os.makedirs(output_dir) # Function to predict images def predict_image(image_path): results = model.predict(source=image_path) input_filename = os.path.basename(results[0].path) annotated_img = results[0].plot() pil_image = Image.fromarray(annotated_img[..., ::-1]) pil_image.save(os.path.join(output_dir, input_filename)) total_polis = 0 total_monos = 0 polis_index = 0 monos_index = 1 verbose_output = results[0].verbose() polis_match = re.search(r'(\d+) poli', verbose_output) monos_match = re.search(r'(\d+) mono', verbose_output) if polis_match: total_polis += int(polis_match.group(1)) if monos_match: total_monos += int(monos_match.group(1)) if total_polis + total_monos == 0: polis_percentage = 0 else: polis_percentage = (total_polis / (total_polis + total_monos)) * 100 return os.path.join(output_dir, input_filename), total_polis, total_monos, polis_percentage # Main Streamlit function def main(): st.title("EndoScan: YOLO Subclinical Endometritis Detector") uploaded_file = st.file_uploader("Choose an image for prediction", type=['jpg', 'jpeg', 'png']) if uploaded_file is not None: image_path = os.path.join(output_dir, uploaded_file.name) with open(image_path, 'wb') as f: f.write(uploaded_file.getbuffer()) st.image(image_path, caption='Uploaded image.', use_column_width=True) if st.button("Predict"): pred_img_path, polis_count, monos_count, polis_perc = predict_image(image_path) st.image(pred_img_path, caption='Predicted image.', use_column_width=True) st.write(f"Total count of polymorphonuclear cells: {polis_count}") st.write(f"Total count of mononuclear cells: {monos_count}") st.write(f"Percentage of polymorphonuclear cells: {polis_perc:.2f}%") if __name__ == '__main__': main()
DawidTobolski/YOLO_cell
YOLO_cell.py
YOLO_cell.py
py
2,252
python
en
code
0
github-code
6
8588345616
from collections import namedtuple from datetime import datetime from time import sleep from timeit import default_timer as timer import re import requests def _request_matches(r, regexp) -> bool: """Check if request has data and that data matches give regular expresssion Args: r: HTTP call result from a status provider, must implement raise_for_status() and .text regexp: Compiler regular expression to search for in the HTTP request text field. Returns: True if a match is found, false if not and None if request contains no .text property """ try: r.raise_for_status() text = r.text return regexp.search(text) is not None except: return None def check_status(url: str, regexp, status_provider, src='localhost') -> dict: """Check status code of a given Url Args: url: URL-string of a resource to check with HTTP GET request. regexp: Regular expression to check respose against if any src: Identifier of a requestor used for reporting and returned as result.src status_provider: Callable used to get a status of a resource. Returns: Object representing a status of the given resource """ ts = datetime.now() start_time = timer() r = status_provider(url) end_time = timer() return { 'timestamp': str(ts), 'src': src, 'target': url, 'time': (end_time - start_time), 'code': r.status_code, 'has_match': _request_matches(r, regexp) if regexp else None } class RestStatusPoller: """A source of REST-resourse status checks. This Source is issuing REST Get requests to a give resource URL and yelds a dict descriding resource status. The source is designed to be used as iterable: for data in source: process(data) Keyword Arguments: url: URL of the resource to check status interval: (int or None): time is sec to wait before the next check. If None is given, the check is performed only once. regexp (str or None): regular expression to search for in the response body, if any. If None is given - no search is performed and 'has_match' field of the status responce is set to None provider(callable on None): a resource status provider override. If None is give - requests.get is used. Default is None. """ def __init__(self, url, interval, regexp, provider=None): self.url = url self.interval = interval self.pattern = re.compile(regexp) if regexp else None self.__value_provide = provider or (lambda x: requests.get( x, headers={'content-type': 'application/json'})) self.__iter_count = 0 def __iter__(self): return self def __next__(self): if self.__iter_count > 0: if self.interval is not None: sleep(self.interval) else: raise StopIteration() self.__iter_count += 1 return check_status(self.url, self.pattern, self.__value_provide)
abbyssoul/site_check
site_checker/rest_source.py
rest_source.py
py
3,130
python
en
code
0
github-code
6
74285386747
import sys sys.path.append('../python') sys.path.append('../apps') import needle as ndl from d2l import torch as d2l import torch import torch.nn as nn import numpy as np class MultiHeadAttention(nn.Module): """多头注意力""" def __init__(self, key_size, query_size, value_size, num_hiddens, num_heads, dropout, bias=False, **kwargs): super(MultiHeadAttention, self).__init__(**kwargs) self.num_heads = num_heads self.attention = d2l.DotProductAttention(dropout) self.W_q = nn.Linear(query_size, num_hiddens, bias=bias) self.W_k = nn.Linear(key_size, num_hiddens, bias=bias) self.W_v = nn.Linear(value_size, num_hiddens, bias=bias) self.W_o = nn.Linear(num_hiddens, num_hiddens, bias=bias) torch.nn.init.kaiming_uniform_(self.W_q.weight) torch.nn.init.kaiming_uniform_(self.W_k.weight) torch.nn.init.kaiming_uniform_(self.W_v.weight) torch.nn.init.kaiming_uniform_(self.W_o.weight) ### test self.X1 = None self.X2 = None self.X3 = None self.output = None self.vl = None def forward(self, queries, keys, values, valid_lens): # queries,keys,values的形状: # (batch_size,查询或者“键-值”对的个数,num_hiddens) # valid_lens 的形状: # (batch_size,)或(batch_size,查询的个数) # 经过变换后,输出的queries,keys,values 的形状: # (batch_size*num_heads,查询或者“键-值”对的个数, # num_hiddens/num_heads) queries = self.transpose_qkv(self.W_q(queries), self.num_heads) keys = self.transpose_qkv(self.W_k(keys), self.num_heads) values = self.transpose_qkv(self.W_v(values), self.num_heads) if valid_lens is not None: # 在轴0,将第一项(标量或者矢量)复制num_heads次, # 然后如此复制第二项,然后诸如此类。 valid_lens = torch.repeat_interleave( valid_lens, repeats=self.num_heads, dim=0) self.vl = valid_lens # output的形状:(batch_size*num_heads,查询的个数, # num_hiddens/num_heads) output = self.attention(queries, keys, values, valid_lens) self.output = output # output_concat的形状:(batch_size,查询的个数,num_hiddens) output_concat = self.transpose_output(output, self.num_heads) return self.W_o(output_concat) def transpose_qkv(self, X, num_heads): """为了多注意力头的并行计算而变换形状""" # 输入X的形状:(batch_size,查询或者“键-值”对的个数,num_hiddens) # 输出X的形状:(batch_size,查询或者“键-值”对的个数,num_heads, # num_hiddens/num_heads) X = X.reshape(X.shape[0], X.shape[1], num_heads, -1) self.X1 = X.detach().numpy() # 输出X的形状:(batch_size,num_heads,查询或者“键-值”对的个数, # num_hiddens/num_heads) X = X.permute(0, 2, 1, 3) self.X2 = X.detach().numpy() # 最终输出的形状:(batch_size*num_heads,查询或者“键-值”对的个数, # num_hiddens/num_heads) X3 = X.reshape(-1, X.shape[2], X.shape[3]) self.X3 = X3.detach().numpy() return X3 def transpose_output(self, X, num_heads): """逆转transpose_qkv函数的操作""" X = X.reshape(-1, num_heads, X.shape[1], X.shape[2]) X = X.permute(0, 2, 1, 3) return X.reshape(X.shape[0], X.shape[1], -1) num_hiddens, num_heads = 100, 5 batch_size, num_queries = 2, 4 num_kvpairs = 6 valid_lens = torch.tensor([3, 2]) # valid_lens = None X = torch.randn((batch_size, num_queries, num_hiddens),dtype=torch.float32) Y = torch.randn((batch_size, num_kvpairs, num_hiddens),dtype=torch.float32) # d2l.check_shape(attention(X, Y, Y, valid_lens), # (batch_size, num_queries, num_hiddens)) dropout = 0 attention_ = ndl.nn.MultiHeadAttention(num_hiddens, num_hiddens, num_hiddens, num_hiddens, num_heads, dropout, device=ndl.cpu(), dtype="float32") valid_lens_ = valid_lens.detach().numpy() if valid_lens is not None else None X_ = ndl.Tensor(X.detach().numpy(), device=ndl.cpu(), dtype="float32") Y_ = ndl.Tensor(Y.detach().numpy(), device=ndl.cpu(), dtype="float32") attention = MultiHeadAttention(num_hiddens, num_hiddens, num_hiddens, num_hiddens, num_heads, dropout) attention.W_q.weight = torch.nn.Parameter(torch.tensor(attention_.W_q.weight.numpy().T, dtype=torch.float32)) attention.W_k.weight = torch.nn.Parameter(torch.tensor(attention_.W_k.weight.numpy().T, dtype=torch.float32)) attention.W_v.weight = torch.nn.Parameter(torch.tensor(attention_.W_v.weight.numpy().T, dtype=torch.float32)) attention.W_o.weight = torch.nn.Parameter(torch.tensor(attention_.W_o.weight.numpy().T, dtype=torch.float32)) print("W_q.weight:", np.linalg.norm(attention.W_q.weight.T.detach().numpy()-attention_.W_q.weight.numpy())) print("W_k.weight:", np.linalg.norm(attention.W_k.weight.T.detach().numpy()-attention_.W_k.weight.numpy())) print("W_v.weight:", np.linalg.norm(attention.W_v.weight.T.detach().numpy()-attention_.W_v.weight.numpy())) print("W_o.weight:", np.linalg.norm(attention.W_o.weight.T.detach().numpy()-attention_.W_o.weight.numpy())) print("X:", np.linalg.norm(X.detach().numpy()-X_.numpy())) queries = attention.transpose_qkv(attention.W_q(X), attention.num_heads) queries_ = attention_.transpose_qkv(attention_.W_q(X_)) zq = attention.W_q(X).detach().numpy() zq_ = attention_.W_q(X_).numpy() print("W_q.weight:", np.linalg.norm(attention.W_q.weight.T.detach().numpy() - attention_.W_q.weight.numpy())) print("W_q(X):", np.linalg.norm(zq - zq_)) X1 = X.reshape((X.shape[0], X.shape[1], attention.num_heads, -1)) X1_ = X_.reshape((X_.shape[0], X_.shape[1], attention_.num_heads, -1)) print("X1-X1_:", np.linalg.norm(X1.detach().numpy() - X1_.numpy())) # print("X1.shape", attention.X1.shape) # print("X1_.shape", attention_.X1.shape) # print("X2.shape", attention.X2.shape) # print("X2_.shape", attention_.X2.shape) # print("X3.shape", attention.X3.shape) # print("X3_.shape", attention_.X3.shape) # print("X1:", np.linalg.norm(attention.X1-attention_.X1)) # print("X2:", np.linalg.norm(attention.X2-attention_.X2)) # print("X3:", np.linalg.norm(attention.X3-attention_.X3)) keys = attention.transpose_qkv(attention.W_k(Y), attention.num_heads) keys_ = attention_.transpose_qkv(attention_.W_k(Y_)) # print("X1:", np.linalg.norm(attention.X1-attention_.X1)) # print("X2:", np.linalg.norm(attention.X2-attention_.X2)) # print("X3:", np.linalg.norm(attention.X3-attention_.X3)) values = attention.transpose_qkv(attention.W_v(Y), attention.num_heads) values_ = attention_.transpose_qkv(attention_.W_v(Y_)) # print("X1:", np.linalg.norm(attention.X1-attention_.X1)) # print("X2:", np.linalg.norm(attention.X2-attention_.X2)) # print("X3:", np.linalg.norm(attention.X3-attention_.X3)) print(np.linalg.norm(X.detach().numpy()-X_.numpy())) print(np.linalg.norm(Y.detach().numpy()-Y_.numpy())) print(np.linalg.norm(queries.detach().numpy()-queries_.numpy())) print(np.linalg.norm(keys.detach().numpy()-keys_.numpy())) print(np.linalg.norm(values.detach().numpy()-values_.numpy())) attention.eval() y = attention(X, Y, Y, valid_lens) print("attn_output.shape:", y.shape) y_ = attention_(X_, Y_, Y_, valid_lens_) print("attn_output_.shape:", y_.shape) if (valid_lens is not None): print("valid_lens:", np.linalg.norm(attention.vl.detach().numpy()-attention_.vl)) print("output:", np.linalg.norm(attention.output.detach().numpy()-attention_.output.numpy())) print("attn_output:", np.linalg.norm(y.detach().numpy()-y_.numpy()))
Erostrate9/needle
tests/MultiHeadAttention.py
MultiHeadAttention.py
py
7,843
python
en
code
2
github-code
6
26287041907
import sys import matplotlib.pyplot as plt import numpy as np import os # this program reads input from a script which has assessed how networks react to a particular combination of gradient and division status # the script has produced for each network a matrix with 0 (migrate) and 1 (divide), which this program will plot and find the consensus for. if len(sys.argv) <3: print ("This is the program 'plot_netanalysis_jan.py'") print ("Usage: ./plot_netanalysis_jan.py <output_file> <plot individuals?> <input filenames>") sys.exit(1) else: outputfile=sys.argv[1] indiplot=int(sys.argv[2]) arraystorage=[] filestorage=[] init=0 count=0 sizes=None consensus=None for filename in sys.argv[3:]: #print ("{}".format(filename)) divmig = np.loadtxt(filename, dtype='i', delimiter='\t') #print sizes if not init: sizes = np.shape(divmig[1:,1:]) consensus=np.zeros((sizes[0]*sizes[1],),dtype=int) init=1 outfile=os.path.splitext(filename)[0] #for if you still need to plot the individuals: if (indiplot): fig=plt.figure() #! fig.set_size_inches(1, 1.*sizes[0]/sizes[1], forward = False) #! ax = plt.Axes(fig, [0., 0., 1., 1.]) #! ax.set_axis_off() #! fig.add_axes(ax) #! ax.imshow(divmig[1:,1:], cmap='RdYlBu', origin='lower') divshare=divmig[1:,1:].sum() migshare=(sizes[0])*(sizes[1])-divshare migs="%04d" % (migshare,) #print divs plt.savefig("div_"+str(migs)+"_"+outfile+".pdf", dpi=sizes[1]) #bbox_inches='tight' plt.close() binarystring=divmig[1:,1:].flatten() consensus=np.add(binarystring, consensus) #print ("{}".format(consensus)) arraystorage.append(binarystring) filestorage.append(outfile) count+=1 #find the consensus sequence bool_consensus= consensus > count/2 print ("{}".format(bool_consensus)) consensus_sequence=bool_consensus.astype(int) print ("consensus is {}".format(consensus_sequence)) wfilename="consensussequence_"+outputfile+".dat" writefile=open(wfilename,"w") for el in consensus_sequence: writefile.write(str(el)+" ") writefile.close() #display consensus image imcons=np.reshape(consensus_sequence,sizes) fig=plt.figure() #! fig.set_size_inches(1, 1.*sizes[0]/sizes[1], forward = False) #! ax = plt.Axes(fig, [0., 0., 1., 1.]) #! ax.set_axis_off() #! fig.add_axes(ax) #! ax.imshow(imcons, cmap='RdYlBu', origin='lower') #outfile=os.path.splitext(outputfile)[0] plt.savefig("consensus"+"_"+outputfile+".pdf", dpi=sizes[1]) #bbox_inches='tight' plt.close() #find for each individual the distance to the consensus sequence #writefile=open(outputfile, "w") #fig=plt.figure() # #hamms=[] minhamm=999999999 for fi,seq in zip(filestorage, arraystorage): hamm=np.count_nonzero(seq!=consensus_sequence) if hamm<minhamm: minhamm=hamm minfile=fi print ("file with individual closest to consensus: {}".format(minfile)) # hamms.append[hamm] #writefile.write(fi+"\t"+str(hamm)+"\n") #maxbina=max(hamms) #hista, bin_edgesa = np.histogram(hamms, bins = range(maxbina)) #plt.plot(bin_edgesa[:-1],hista) #writefile.close()
RenskeVroomans/regulation_evolution
scripts/plot_netanalysis_jan.py
plot_netanalysis_jan.py
py
3,203
python
en
code
0
github-code
6
25225150959
import time import random class NPC(): def __init__(self, trigger_item, speech = "", name = ""): self.name = name self.trigger_item = trigger_item self.speech = speech self.health = 20 def deliver_speech(self): print("\nThe patient runs towards you intent on attacking you") time.sleep(2) print("but when they get closer, they recognise an item you have.") time.sleep(2) print("\nIt used to belong to them. Remembering themselves, they tell you:") print(self.speech) def attack(self): print("\nThe patient runs towards you intent on attacking you") #10% of the time 40-60 damage #20% of the time 30 - 40 #30 % of the time 20 - 30 #40 % of the time 10 - 20 if self.health <= 5: print("but they're too injured") return 0 else: damage_dealt = random.randint(10,50) return damage_dealt def decide_action(self, player_inv): if self.trigger_item in player_inv: self.deliver_speech() return 0 else: damage = self.attack() return damage
marivielle/KFC
NPC.py
NPC.py
py
1,239
python
en
code
0
github-code
6
5518662883
#!/usr/bin/python3 import sys, getopt #Replace version number in html files def replace_version (current_version, new_version): #Files where version number will be replaced files = ['index.html', 'article.html', './write/index.html'] #Goes through the array replacing the version in each file for file_name in files: with open (file_name) as f: newFileContent = f.read ().replace (current_version, new_version) with open (file_name, 'w') as f: f.write (newFileContent) #Parses and validates command line arguments def parse_arguments (argv): from_version = '' to_version = '' try: opts, args = getopt.getopt (sys.argv[1:], '',['from=','to=']) except getopt.GetoptError: print ('Usage: new_version.py --from <current_version> --to <new_version>') sys.exit () for opt, arg in opts: if opt == '--help': print ('Usage: new_version.py --from <current_version> --to <new_version>') sys.exit () elif opt in ('--from'): from_version = arg elif opt in ('--to'): to_version = arg else: print ('Usage: new_version.py --from <current_version> --to <new_version>') sys.exit () if (from_version == '' or to_version == ''): print ('Usage: new_version.py --from <current_version> --to <new_version>') sys.exit () else: #Returns parsed arguments return (from_version, to_version) # Parses command line arguments --from and --to so executes the if __name__ == '__main__': from_version, to_version = parse_arguments (sys.argv) replace_version (from_version, to_version);
willgcr/mblog
new_version.py
new_version.py
py
1,521
python
en
code
2
github-code
6
30728277710
import fileinput from typing import Counter ll = [l.strip() for l in fileinput.input()] numbers = [] for line_nr in range(len(ll)): l = ll[line_nr] numbers = [int(x) for x in l.split(',')] def count_fishes(days): dd = Counter(numbers) for _ in range(days): new_fishes = dd[0] for i in range(0, 8): dd[i] = dd[i+1] dd[6] += new_fishes dd[8] = new_fishes return sum([dd[i] for i in dd]) print(count_fishes(80), count_fishes(256))
mdaw323/alg
adventofcode2021/6.py
6.py
py
498
python
en
code
0
github-code
6
3848748609
from setuptools import setup, Extension condor_module = Extension('condor', sources=['c/condor.c', 'c/glutils.c'], libraries=['GLEW', 'glfw']) setup (name='Condor', version='0.1', description='', ext_modules=[condor_module])
enricozb/Condor
condor/setup.py
setup.py
py
301
python
en
code
0
github-code
6
39685754485
with open('input.txt', 'r') as f: priorities = 0 for line in f: l = len(line)//2 s1, s2 = line[l:-1], line[:l] for c in s1: if c in s2: priorities += 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'.index(c) + 1 break print(priorities)
SalmonA/adventofcode
2022/03/03_1.py
03_1.py
py
320
python
en
code
0
github-code
6
19411076487
def create_offering(newOffering): classTimesArray = [] if newOffering.classTimes: for classTime in newOffering.classTimes: classTime = { u'location': classTime.location, u'startTime': classTime.startTime, u'endTime': classTime.endTime, u'sunday': classTime.sunday, u'monday': classTime.monday, u'tuesday': classTime.tuesday, u'wednesday': classTime.wednesday, u'thursday': classTime.thursday, u'friday': classTime.friday, u'saturday': classTime.saturday } classTimesArray.append(classTime) extrasDict = { u'Attributes': newOffering.attributes, u'Levels':newOffering.levels, u'Total Seats': newOffering.totalSeats, u'Taken Seats': newOffering.takenSeats, u'Total Waitlist Seats': newOffering.totalWaitlistSeats, u'Taken Waitlist Seats': newOffering.takenWaitlistSeats } return { u'sectionNumber': newOffering.sectionNumber, u'status': newOffering.status, u'id': newOffering.id, u'instructors': newOffering.instructors, u'classTimes': classTimesArray, u'extras': extrasDict } class Offering: status = None levels = None id = None departmentName = None departmentAcronym = None departmentNumberString = None departmentNumber = None sectionNumber = None name = None credit = None classTimes = None startDate = None endDate = None comment = None attributes = None booksLink = None bulletinLink = None description = None instructors = None totalSeats = None takenSeats = None totalWaitlistSeats = None takenWaitlistSeats = None class ClassTime: location = None startTime = None endTime = None sunday = False monday = False tuesday = False wednesday = False thursday = False friday = False saturday = False import requests from datetime import datetime import pytz from pytz import timezone eastern = timezone('EST') import re from bs4 import BeautifulSoup import firebase_admin from firebase_admin import credentials from firebase_admin import firestore import google.cloud.exceptions import urllib print ("-------- EMERSON COURSE SCRAPE ----------") cred = credentials.Certificate('./credentials.json') firebase_admin.initialize_app(cred) # db = firestore.client() # Make request and load offerings data = {'begin_ap':'a','begin_hh':'0','begin_mi':'0','end_ap':'a','end_hh':'0','end_mi':'0', 'sel_attr':['dummy','%'],'sel_camp':['dummy','%'],'sel_crse':'','sel_day':'dummy','sel_from_cred':'', 'sel_insm':'dummy','sel_instr':['dummy','%'],'sel_levl':['dummy','%'],'sel_ptrm':['dummy','%'], 'sel_schd':['dummy','%'],'sel_sess':'dummy','sel_subj':['dummy','BC','MB','CM','CD','CC','DA','DD','EC', 'EXT','FL','LF','HI','HS','IN','JR','LI','MK','MT','MU','PA','PH','PL','PF','PDE','CE','PS','PB','RL', 'SOC','SA','SC','SW','SO','LS','TH','VM','WDC','WR'],'sel_title':'','sel_to_cred':'','term_in':'201910'} url = "https://ssb.emerson.edu/PURPLE/bwckschd.p_get_crse_unsec" # get departments and instructors first print("Fetching homepage...") dataHomepage = dict(data) dataHomepage['sel_subj'] = 'dummy' r = requests.post(url, data=dataHomepage) soup = BeautifulSoup(r.content, "html.parser") unlistedDepts = { "Bsns of Creative Enterprises": "BC", "Civic Media": "CM", "External Program Course": "EXT VAL", "Prof Development Experience":"PDE", "School of Communication":"SOC", "Washington Program":"DC" } print("Page fetched. Uploading departments...") departments = soup.find('td', class_='dedefault').find_all('option') departmentsArray = [] for department in departments: info = department.text.split("(") if len(info)>1: deptDict = { u'departmentAcronym':re.sub('[^A-Z]','', info[1].strip()), u'departmentName':info[0].strip() } else: deptDict = { u'departmentAcronym':unicode(unlistedDepts[info[0].strip()]), u'departmentName':info[0].strip() } departmentsArray.append(deptDict) doc_ref = db.collection(u'schools/emerson/lists').document('departments') doc_ref.set({u'list':departmentsArray}) print("Departments uploaded. Uploading instructors...") instructors = soup.find('select', attrs={"name": "sel_instr"}).find_all('option') instructorsArray = [] for p in range(1,len(instructors)): instructor = re.sub(' +', ' ',instructors[p].text.strip()) if not instructor in instructorsArray: instructorsArray.append(instructor) doc_ref = db.collection(u'schools/emerson/lists').document('instructors') doc_ref.set({u'list':instructorsArray}) print("Instructors uploaded. Uploading courses. Fetching all courses on one page...") # Long, full networking request r = requests.post(url, data=data) print("Page fetched. Parsing and uploading...") soup = BeautifulSoup(r.content,"html.parser") # Speedier file test # file = urllib.urlopen("file:///Users/timtraversy/Google Drive//Development/Course Gnome/code/GWU-Scrape-Python/test.html") # soup = BeautifulSoup(file,"html.parser") offering_table = soup.find('table', class_='datadisplaytable') offerings = offering_table.find_all('tr', recursive=False) courseArray = [] # Loop over offerings two at a time to get both data pieces count = 0 for i in range(0,len(offerings),2): # Set up offering object newOffering = Offering() data = offerings[i].text.split(' - ') # Hack to account for class names that have a " - " offset = 0 if len(data) > 4: concatName = data[0].strip() for m in range(1, len(data)-3): concatName += " - " concatName += data[m].strip() offset += 1 newOffering.name = concatName else: newOffering.name = data[0].strip() if newOffering.name == 'Cancelled': continue newOffering.id = data[1+offset].strip() newOffering.departmentAcronym = data[2+offset].strip().split(' ')[0] if newOffering.departmentAcronym == "EXT": newOffering.departmentAcronym = unicode("EXT VAL") newOffering.departmentName = unicode("External Program Course") else: for dept in departmentsArray: if dept[u'departmentAcronym'] == newOffering.departmentAcronym: newOffering.departmentName = dept[u'departmentName'] newOffering.departmentNumber = data[2+offset].strip().split(' ')[1] newOffering.sectionNumber = data[3+offset].strip() # Get seat details + status url = "https://ssb.emerson.edu" + offerings[i].find('a')['href'] r = requests.post(url) detailSoup = BeautifulSoup(r.content,"html.parser") seats = detailSoup.find_all('td', class_="dddefault") # Seats newOffering.totalSeats = seats[1].text newOffering.takenSeats = seats[2].text # newOffering.totalWaitlistSeats = seats[4].text # newOffering.takenWaitlistSeats = seats[5].text # Status if newOffering.totalSeats > newOffering.takenSeats: newOffering.status = u'OPEN' elif newOffering.totalWaitlistSeats == '0': newOffering.status = u"CLOSED" else: newOffering.status = u"WAITLIST" # get levels and attributes data = offerings[i+1].find_all('span') for span in data: if span.text.strip() == 'Levels:': newOffering.levels = span.next_sibling.strip() elif span.text.strip() == 'Attributes:': newOffering.attributes = span.next_sibling.strip() # Credits catalog_entry = offerings[i+1].find('a') credits = catalog_entry.previous_sibling.previous_sibling.previous_sibling.strip() credits = re.sub('Credits','', credits).strip() credits = re.sub('\.0+','', credits).strip() credits = re.sub('OR','or', credits) credits = re.sub('TO','to', credits) credits = re.sub(' +',' ', credits) newOffering.credit = unicode(credits) # Description from catalog entry url = "https://ssb.emerson.edu" + catalog_entry['href'] r = requests.post(url) catalogSoup = BeautifulSoup(r.content,"html.parser") newOffering.description = catalogSoup.find('td', class_="ntdefault").text.split('\n')[1].strip() #Class Times instructors = [] classTimes=[] class_time_table = offerings[i+1].find('table',class_='datadisplaytable') if class_time_table: class_time_table = class_time_table.find_all('tr') for j in range(1,len(class_time_table)): newClassTime = ClassTime() details = class_time_table[j].find_all('td',class_='dddefault') for k in range (1,len(details)): text = details[k].text.strip() valid = True if k == 1: if text != 'TBA': times = text.split('-') newClassTime.startTime = eastern.localize(datetime.strptime(times[0].strip(), '%I:%M %p')) newClassTime.endTime = eastern.localize(datetime.strptime(times[1].strip(), '%I:%M %p')) else: valid = False break if k == 2: if 'U' in text: newClassTime.sunday = True if 'M' in text: newClassTime.monday = True if 'T' in text: newClassTime.tuesday = True if 'W' in text: newClassTime.wednesday = True if 'R' in text: newClassTime.thursday = True if 'F' in text: newClassTime.friday = True if 'S' in text: newClassTime.saturday = True if k == 3: # location newClassTime.location = text if k == 6: insts = re.sub('\([A-z]\)','', text).split(',') for inst in insts: if inst == "TBA": instructors = None break newInst = inst.strip() if not newInst in instructors: instructors.append(newInst) if valid: classTimes.append(newClassTime) if classTimes: newOffering.classTimes = classTimes if instructors: newOffering.instructors = instructors courseArray.append(newOffering) print('Parsed: {id}, Count:{len}'.format(id=unicode(newOffering.id), len=len(courseArray))) count = 0 for indx, course in enumerate(courseArray): offeringsArray = [create_offering(course)] index = indx + 1 while index < len(courseArray): courseTwo = courseArray[index] if (course.name == courseTwo.name and course.departmentNumber == courseTwo.departmentNumber and course.departmentAcronym == courseTwo.departmentAcronym): offeringsArray.append(create_offering(courseTwo)) del courseArray[index] else: index += 1 dictionary = { u'departmentName': course.departmentName, u'departmentAcronym': course.departmentAcronym, u'departmentNumber': course.departmentNumber, u'name': course.name, u'credit': course.credit, u'description': course.description, u'offerings': offeringsArray, } identifier = unicode(course.departmentAcronym + str(course.departmentNumber)) db.collection(u'schools/emerson/fall2018_courses').document(identifier).set(dictionary) count += 1 print('Uploaded ({count}/{total}): {id}'.format(count=count, total=len(courseArray), id=course.id)) # Updating version number doc_ref = db.collection(u'schools').document(u'emerson') try: doc = doc_ref.get() version = doc.to_dict()['version'] print(u'Updating from version {}'.format(doc.to_dict()['version'])) doc_ref.set({u'version':version + 1}) except google.cloud.exceptions.NotFound: print(u'No metadata, something is wrong.') exit(1) print ("----- EMERSON COURSE SCRAPE COMPLETE ------")
timtraversy/GWU-Scrape-Python
emerson-scrape.py
emerson-scrape.py
py
12,340
python
en
code
0
github-code
6
25022064101
# http://docs.python.org/library/htmlparser.html from html.parser import HTMLParser class MyHTMLParser(HTMLParser): def handle_starttag(self, tag, attrs): print("start tag: %s" % tag) def handle_endtag(self, tag): print("end tag: %s" % tag) def main(): page="<a color=black>poo</a>" page =""" <html> <head> <title>test</title> </head> <body> </body> </html> """ myparser = MyHTMLParser() myparser.feed(page) if __name__ == '__main__': main()
ahbaid/learn
python/scae/class-08/html1.py
html1.py
py
495
python
en
code
1
github-code
6
13658425408
import numpy as np import pandas as pd import xarray as xr import matplotlib.pyplot as plt def summarize_qc_resamples(input_df, verbose=False, **resample_kwargs): time_list = list() data_list = list() for time, df in input_df.resample(**resample_kwargs): if verbose == True: print("Currently working on: {}".format(time)) time_list.append(time) df_stats = df.qc.describe() data_list.append(df_stats.values) else: measures = df_stats.index.to_list() variables = df.columns.to_list() attrs = resample_kwargs return xr.DataArray(np.dstack(data_list), coords = [measures, variables, time_list], dims = ['measure','variable','time'], name = "qc_summary", attrs = attrs)
wangsen992/pyqc
src/pyqc/tools.py
tools.py
py
855
python
en
code
0
github-code
6
19504742337
from igraph import Graph from igraph import plot grafo = Graph(edges = [(0,1),(2,3),(0,2),(0,3)], directed = True) grafo.vs['label'] =['Fernando', 'Pedro', 'Jose', 'Antonio'] grafo.vs['nota'] = [100, 40, 60, 20] grafo.es['tipoAmizade'] = ['Amigo', 'Inimigo', 'Amigo'] grafo.es['devendo'] = [1,3,2,5] grafo.vs['color'] = ['red', 'yellow','orange', 'green'] plot(grafo, bbox =(300,300), vertex_size = grafo.vs['nota'], edge_width = grafo.es['devendo'], vertex_color = grafo.vs['color'], edge_curved = 0.4, vertex_shape = 'square')
guibarreta1993Average/data_science_udemy
05_Grafos/aula34_impressao.py
aula34_impressao.py
py
557
python
en
code
0
github-code
6
31148205537
import argparse import seaborn as sns import matplotlib.pyplot as plt from scipy import stats import pandas as pd import numpy as np import json import os def parse_args(): parser = argparse.ArgumentParser(prog='') parser.add_argument('json', type=str, help='Figure1 JSON.') parser.add_argument('-o', '--output_dir', default='.', help='') args = parser.parse_args() return(args) def b(paths, outfile): dar_enrich = pd.read_csv(paths['figure6']['b']['dar_enrichment'], sep='\t') fp_enrich = pd.read_csv(paths['figure6']['b']['footprint_enrichment'], sep='\t') f, axes = plt.subplots(1,2, num='b', figsize=(12, 6)) fp_logp = fp_enrich['pval_enrichment'].map(lambda x: -1*np.log10(x)) fp_logp = fp_logp.rename('footprint enrichments') dar_logp = dar_enrich['pval_enrichment'].map(lambda x: -1*np.log10(x)) dar_logp.sort_values(ascending=False, inplace=True) dar_logp = dar_logp.rename('top DAR enrichments') dar_logp = dar_logp[:10] sns.set_style("whitegrid") sns.kdeplot(dar_logp, shade=True, color="#E74C3C", ax=axes[0]) sns.kdeplot(fp_logp, shade=True, color="#3498DB", ax=axes[0]) axes[0].set_xlabel('-log10 pval', fontsize=15) def label_point(x, y, val, ax): a = pd.concat({'x': x, 'y': y, 'val': val}, axis=1) for i, point in a.iterrows(): ax.text(point['x']+.02, point['y'], str(point['val']), fontsize=10) def rand_jitter(arr, c): stdev = c*(max(arr)-min(arr)) return arr + stdev fp_enrich['pval_enrichment'] = -1*np.log10(fp_enrich['pval_enrichment']) fp_enrich.sort_values('pval_enrichment', ascending=False, inplace=True) fp_enrich.reset_index(drop=True, inplace=True) sns.scatterplot(x=fp_enrich.index.tolist(), y='pval_enrichment', data=fp_enrich, ax=axes[1]) # label_point(pd.Series(fp_enrich.index.tolist()[:10]), fp_enrich['pval_enrichment'][:10], fp_enrich['name'][:10], axes[1]) axes[1].set_xticks='' f.savefig(outfile, dpi=300) def c(paths, outfile): fp_enrich = pd.read_csv(paths['figure6']['c'], sep='\t') hic_hit = fp_enrich[fp_enrich['name']=='ZNF416-Zf'] hic_df = pd.melt(hic_hit, id_vars=None, value_vars=['target_freq', 'bg_freq'], var_name='enrichment group', value_name='% total footprints') hic_df.sort_values('enrichment group', inplace=True) sns.set_style("whitegrid") f, axes = plt.subplots(1,1, num='c', figsize=(12, 12)) palette = ['#ABB2B9','#A569BD'] sns.barplot(x='enrichment group', y='% total footprints', data=hic_df, palette=palette, ax=axes) axes.set_xlabel('', fontsize=15) axes.set_xticks = '' axes.set_xticklabels([]) axes.set_ylabel('') f.savefig(outfile, dpi=300) def main(): args = parse_args() if not os.path.exists(args.output_dir): os.makedirs(args.output_dir) with open(args.json) as fp: paths = json.load(fp) bof = os.path.join(args.output_dir, 'Figure6b.png') cof = os.path.join(args.output_dir, 'Figure6c.png') b(paths, bof) c(paths, cof) if __name__ == '__main__': main()
perezja/Leukos
presentation/figure6/figure6.py
figure6.py
py
3,116
python
en
code
0
github-code
6
5657507234
import os from functools import reduce class Photo: id = None layout = None # v or h tags = [] def __init__(self, id, layout, tags): self.id = id self.layout = layout # self.tagalf = "".join(sorted(tags)) self.tagalf = tuple(sorted(tags)) self.tags = tags def __str__(self): return str(self.id) + " - " + " ".join(self.tags) class Slide: # 2 vertical or 1 horizontal photo_ids = [] tags = [] def __init__(self, photos): self.photo_ids = [str(photo.id) for photo in photos] self.tags = set(reduce(list.__add__, map(lambda x: list(x.tags), photos))) self.tags_sorted = tuple(sorted(list(self.tags))) def __str__(self): return " ".join([str(x) for x in self.photo_ids]) + " - " + " ".join([str(x) for x in self.tags]) class SlideShow: slides = [] def __init__(self, slides=None): self.slides = [] if slides is None else slides def calculate_score(self): if len(self.slides) == 0: return 0 score = 0 for i, slide in enumerate(self.slides): score += self.interest_factor(i) return score def interest_factor(self, i): if i + 1 >= len(self.slides): return 0 return interest_factor(self.slides[i], self.slides[i + 1]) def interest_factor(slide_1, slide_2): """ interest of slides Minimum between the number of common tags between Si and Si+1 the number of tags in Si but not in Si+1 the number of tags in Si+1 but not in Si """ common = set(slide_1.tags) & set(slide_2.tags) n_common = len(common) n_left = len(slide_1.tags) - len(set(slide_1.tags) & set(common)) n_right = len(slide_2.tags) - len(set(common) & set(slide_2.tags)) return min(n_common, n_left, n_right) def n_common_tags(slide_1, slide_2): # return len(set(slide_1.tags) & set(slide_2.tags)) return len(set(slide_1.tags).intersection(slide_2.tags)) def read_input(filepath): with open(filepath, 'r') as f: n = int(f.readline()) i = 0 result = [] while i < n: line = f.readline()[:-1].split(" ") result.append(Photo(i, line[0], line[2:])) i += 1 return result def write_output(slideshow, output_file): with open(output_file, "w") as f: f.write(str(len(slideshow.slides)) + "\n") for slide in slideshow.slides: f.write(' '.join(slide.photo_ids) + "\n") with open(output_file, 'rb+') as f: f.seek(-2, os.SEEK_END) f.truncate() def get_slideshow(photos): slideshow = SlideShow() vert = None slides = [] for photo in sorted(photos, key=lambda x: x.tagalf): if photo.layout == "H": slides.append(Slide([photo])) elif photo.layout == "V" and vert is None: vert = photo elif photo.layout == "V" and vert is not None: slides.append(Slide([photo, vert])) vert = None slides.sort(key=lambda x: x.tags_sorted) return SlideShow(slides) def main(): files = ['a_example.txt', 'b_lovely_landscapes.txt', 'c_memorable_moments.txt', 'd_pet_pictures.txt', 'e_shiny_selfies.txt'] sum_score = 0 for file in files: print(file) photos = read_input(file) slideshow = get_slideshow(photos) score = slideshow.calculate_score() sum_score += score print("SCORE: {}\n".format(score)) write_output(slideshow, "output/" + file) print("END, {}".format(sum_score)) return None if __name__ == "__main__": main()
phyx4/hashcode_2019
main.py
main.py
py
3,664
python
en
code
0
github-code
6
24931817284
from json import dumps, loads from State import State class Api: """ A class that provides methods for encoding and decoding States to and from JSON strings. Methods: - Encode(states: list[State]) -> str: Encodes a list of State objects to a JSON string. - Decode(jsonString: str) -> State: Decodes a JSON string to a State object. """ def Encode(states: list[State]) -> str: """ Encodes a list of State objects to a JSON string. Args: - states (list[State]): A list of State objects to encode. Returns: - str: A JSON string representing the list of State objects. """ return dumps([state.__dict__ for state in states]) def Decode(jsonString: str) -> State: """ Decodes a JSON string to a State object. Args: - jsonString (str): A JSON string to decode. Returns: - State: A State object representing the decoded JSON string. """ obj = loads(jsonString) return State( obj['Board'], obj['Direction'], (obj['EmptyPoint']['X'], obj['EmptyPoint']['Y']) )
Saeed-Ayman/8-puzzle
API.py
API.py
py
1,287
python
en
code
1
github-code
6
712141287
#! /usr/bin/env python3 # coding: utf-8 import os import logging as lg import pandas as pd import numpy as np lg.basicConfig(level=lg.DEBUG) import os import pandas as pd class SetOfParliamentMembers: def __init__(self, name): self.name = name def __repr__(self): return "setOfParliamentMember: {} members".format(len(self.dataframe)) def data_from_csv(self, csv_file): self.dataframe = pd.read_csv(csv_file, sep=";", engine = 'python') def data_from_dataframe(self, dataframe): self.dataframe = dataframe def display_chart(self): # à venir, patience ! pass def split_by_political_party(self): result = {} data = self.dataframe all_parties = data["parti_ratt_financier"].dropna().unique() for party in all_parties: data_subset = data[data.parti_ratt_financier == party] subset = SetOfParliamentMembers('MPs from party "{}"'.format(party)) subset.data_from_dataframe(data_subset) result[party] = subset return result def launch_analysis(data_file, by_party=False, info=False): sopm = SetOfParliamentMembers("All MPs") sopm.data_from_csv(os.path.join("data", data_file)) sopm.display_chart() if by_party: for party, s in sopm.split_by_political_party().items(): s.display_chart() if info: print(sopm) if __name__ == "__main__": launch_analysis("current_mps.csv")
honorezemagho/python-oc
analysis/csv.py
csv.py
py
1,496
python
en
code
0
github-code
6
7276876468
from django.db import models from django.contrib.auth.models import User class Animal(models.Model): """Класс описывает объект Животное""" owner = models.ForeignKey(User, on_delete=models.CASCADE, verbose_name="Владелец") species = models.CharField(max_length=30, verbose_name="Вид животного") name = models.CharField(max_length=30, verbose_name="Кличка") birth = models.DateField(verbose_name="Дата рождения") breed = models.CharField(max_length=30, verbose_name="Порода") gender = models.CharField( max_length=10, choices=[("Ж", "Женский"), ("М", "Мужской")], verbose_name="Пол" ) class Meta: verbose_name = "Животное" verbose_name_plural = "Животные" def __str__(self): return self.name class Vaccination(models.Model): """Класс описывающий объект Вакцинация""" animal = models.ForeignKey( Animal, on_delete=models.CASCADE, verbose_name="Животное" ) date = models.DateField(verbose_name="Дата прививки") vaccine = models.CharField(max_length=50, verbose_name="Вакцина") class Meta: verbose_name = "Вакцинация" verbose_name_plural = "Вакцинация" def __str__(self): return f"{self.date}" class Treatment(models.Model): """Класс описывающий объект Обратока от паразитов""" animal = models.ForeignKey( Animal, on_delete=models.CASCADE, verbose_name="Животное" ) parasite_type = models.CharField( max_length=10, choices=[("Гельминты", "Гельминты"), ("Клещи", "Клещи")], verbose_name="Вид паразитов", ) date = models.DateField(verbose_name="Дата обработки") medication = models.CharField(max_length=50, verbose_name="Препарат") dosage = models.CharField(max_length=10, verbose_name="Дозировка") class Meta: verbose_name = "Обработка от паразитов" verbose_name_plural = "Обработка от паразитов" def __str__(self): return f"{self.date}"
Gamilkar/animal_medical_record
main/models.py
models.py
py
2,320
python
ru
code
0
github-code
6
12510085973
from tqdm import tqdm import math import time import numpy as np def bingliu_mpqa(utterance_tokenized, file): feat_ = [] dict1_bing = {} for line in file: x = line.split("\t") dict1_bing[x[0] + "_" + x[1][:-1]] = 1 i=0 for tokens in utterance_tokenized: res = np.array([0,0,0,0]) for token in tokens: pos = (token + "_positive") neg = (token + "_negative") if (pos in dict1_bing): res[0]+=1 res[1]+=1 elif (neg in dict1_bing): res[1]-=1 if res[0]>0: res[2]=1 if tokens!=[]: pos = tokens[-1] + "_positive" neg = tokens[-1] + "_negative" if pos in dict1_bing: res[3]=1 elif neg in dict1_bing: res[3]=-1 feat_.append(res) return np.array(feat_) def SENT140(X): #sentiment140 dict1_S140 = {} with open("lexicons/3. Sentiment140-Lexicon-v0.1/unigrams-pmilexicon.txt", 'r') as fd: for line in fd: x = line.split(" ") dict1_S140[x[0]] = float(x[1]) feat_ = [] for tokens in X: sent140 = [0,0,0,0] cnt = 0 for token in tokens: if("#" not in token): cnt += 1 if(token in dict1_S140): sent140[0] += (dict1_S140[token] > 0) sent140[1] += dict1_S140[token] sent140[2] = max(sent140[2],dict1_S140[token]) if(len(tokens) >= 1 and tokens[-1] in dict1_S140): sent140[3] = (dict1_S140[tokens[-1]] > 0) feat_.append(sent140) return np.array(feat_) # print() def NRC_EMOTION(X): #NRC emotion dict1_NRC = {} cnt_r = 0 len1 = 0; with open("lexicons/6. NRC-10-expanded.csv", 'r') as fd: for line in fd: if(cnt_r == 0): cnt_r += 1 continue; x = line.split(" ") dict1_NRC[x[0]] = [float(i) for i in x[1:]] len1 = len(x[1:]) feat_ = [] for e,tokens in tqdm(enumerate(X)): emo_score = [[0,0,0,0] for i in range(len1)] cnt = 0 for token in tokens: if("#" in token): continue cnt += 1 if(token in dict1_NRC): for i,val in enumerate(dict1_NRC[token]): emo_score[i][0] += (val > 0) emo_score[i][1] += val emo_score[i][2] = max(emo_score[i][2],val) if(len(tokens) >= 1 and tokens[-1] in dict1_NRC): for i,val in enumerate(dict1_NRC[token]): emo_score[i][3] = (val > 0) res = [] for i in emo_score: res.extend(i) feat_.append(res) return np.array(feat_) # print() def NRC_HASHTAG_SENT(X): #NRC hashtag dict1_NRC = {} with open("lexicons/7. NRC-Hashtag-Sentiment-Lexicon-v0.1/unigrams-pmilexicon.txt", 'r') as fd: for line in fd: x = line.split(" ") dict1_NRC[x[0]] = float(x[1]) feat_ = [] for tokens in X: cnt = 0 f = [0,0,0,0] for token in tokens: if("#" not in token): continue cnt += 1 if(token in dict1_NRC): f[0] += (dict1_NRC[token] > 0) f[1] += dict1_NRC[token] f[2] = max(f[2],dict1_NRC[token]) if(len(tokens) >= 1 and tokens[-1] in dict1_NRC): f[3] = (dict1_NRC[tokens[-1]] > 0) feat_.append(f) return np.array(feat_) def lexicons(utterance_tokenized): filebingliu = open("lexicons/1. BingLiu.csv", "r") filempqa = open("lexicons/2. mpqa.txt", "r") start = time.time() bingliu = bingliu_mpqa(utterance_tokenized, filebingliu) mpqa = bingliu_mpqa(utterance_tokenized, filempqa) sent140 = SENT140(utterance_tokenized) nrcemotion = NRC_EMOTION(utterance_tokenized) nrchashtag = NRC_HASHTAG_SENT(utterance_tokenized) end = time.time() print("time to calculate lexicons: ", end-start) # y = len(bingliu[0]) + len([mpqa[0]]) + len(sent140[0]) + len(nrcemotion[0]) + len(nrchashtag[0]) feature = np.zeros([len(utterance_tokenized), 56]) for i in range(len(utterance_tokenized)): feature[i] = np.concatenate((bingliu[i], mpqa[i], sent140[i], nrcemotion[i], nrchashtag[i])) return feature if __name__=='__main__': lexicons(utterance_tokenized)
hamzah70/Multi_Modal_Emotion_Analysis
lexiconFeatureVector.py
lexiconFeatureVector.py
py
4,491
python
en
code
0
github-code
6
38353405555
import requests from bs4 import BeautifulSoup #screen-scraping library #request = requests.get("http://www.google.com") request = requests.get("https://www.johnlewis.com/house-by-john-lewis-curve-dining-chair-white/p231441579") content = request.content #getting content of the page soup = BeautifulSoup(content, "html.parser") element = soup.find("span",{"itemprop":"price","class":"now-price"}) #dictionary #print(element.text.strip()) string_price = element.text.strip() #"#£19.00" price_without_symbol = string_price[1:] price = (float(price_without_symbol)) if price < 50: print("You should buy the chair!") print("The current price is {}.".format(string_price)) else: print("Don't buy the chair!!") # <span itemprop="price" class="now-price"> £19.00 </span> #print(request.content)
BrayoKane/python-mongo
price-of-a-chair/src/app.py
app.py
py
811
python
en
code
0
github-code
6
74022415547
from rest_framework import status from rest_framework.decorators import action from rest_framework.permissions import AllowAny from rest_framework.response import Response from apps.celery_task.models import PeriodicTask from apps.celery_task.serializers.periodic_task_serializer import PeriodicTaskSerializer, CreatePeriodicTaskSerializer from packages.drf.pagination import CustomPageNumberPagination from packages.drf.renderers import CustomRenderer from packages.drf.viewsets import ModelViewSet from django_filters import FilterSet class PeriodicTaskFilter(FilterSet): class Meta: model = PeriodicTask fields = {"name": ["exact"], "creator": ["contains"]} class PeriodicTaskViewSet(ModelViewSet): permission_classes = [AllowAny] queryset = PeriodicTask.objects.all() serializer_class = PeriodicTaskSerializer pagination_class = CustomPageNumberPagination renderer_classes = (CustomRenderer,) filter_class = PeriodicTaskFilter ordering_fields = ["id"] ordering = ["-id"] def create(self, request, *args, **kwargs): serializer = CreatePeriodicTaskSerializer(data=request.data) serializer.is_valid(raise_exception=True) name = serializer.validated_data["name"] creator = "test" serializer.validated_data["name"] = name serializer.validated_data["creator"] = creator instance = serializer.save() instance.set_enabled(True) return Response(serializer.data, status=status.HTTP_201_CREATED) @action(methods=["post"], detail=False) def create_task(self, request, *args, **kwargs): """创建任务 { "name": "test", "cron": {"minute":"*/5","hour":"*","day_of_week":"*","day_of_month":"*","month_of_year":"*"}, } """ params = request.data cron_data = params.get("cron") name = params.get("name") creator = params.get("creator", "test") periodic_task = PeriodicTask.objects.create_task(name, cron_data, creator) periodic_task.set_enabled(True) return Response({"result": "创建成功"})
yaowuya/django-major-core
apps/celery_task/views/periodic_task_view.py
periodic_task_view.py
py
2,133
python
en
code
0
github-code
6
18959826347
from rest_framework.decorators import api_view, permission_classes import random import string from pprint import pprint as pp import requests from allauth.account.models import EmailAddress from rest_framework import status from rest_framework.decorators import api_view, permission_classes from rest_framework.permissions import AllowAny from rest_framework.response import Response from points.views import new_user_point from .serializers import * User = get_user_model() @api_view(['POST']) @permission_classes([AllowAny]) def kakao_login_and_get_userinfo(request): code = request.data.get('code') headers = { 'Content-type': 'application/x-www-form-urlencoded', } body = { 'grant_type': 'authorization_code', 'client_id': 'dcf8cc38ec4e7ec39baf6207a53ed140', 'redirect_uri': 'https://kickin.kr/loading/', 'code': code, } response = requests.post(headers=headers, url='https://kauth.kakao.com/oauth/token', data=body) access_token = response.json().get('access_token') headers = { 'Authorization': f'Bearer {access_token}', 'Content-type': 'application/x-www-form-urlencoded;charset=utf-8', } info_request = requests.get(url='https://kapi.kakao.com/v2/user/me', headers=headers) info_res = info_request.json() nickname = info_res.get('properties').get('nickname') email = info_res.get('kakao_account').get('email') # 해당 이메일을 사용해 가입한 이력이 있는지, 확인한다. # 해당 이메일로 가입한 이력이 없다면, 새로운 유저를 생성한다. user = User.objects.filter(email=email) if not user: user = User.objects.create_user(email=email, password='Kakao_' + nickname + '977') user.login_type = 1 user.save() # 카카오 로그인의 경우 별도의 이메일 인증을 생략 EmailAddress.objects.create(user=user, email=email, verified=True, primary=True) # 해당 유저의 정보를 업데이트한다. : login_type = 1 (카카오 로그인) # user Info 생성 user_info, user_created = UserInfo.objects.get_or_create(user=user) new_user_point(user.id) # 해당 유저의 포인트를 생성한다. # 소셜 로그인 정보는, 언제든 바뀔 수 았기 때문에 굳이 저장하지 않는다. kakao_profile = info_res.get('kakao_account').get('profile').get('profile_image_url') kakao_nickname = info_res.get('properties').get('nickname') # 로그인 응답 데이터 생성 response_data = { 'kakao_profile': kakao_profile, 'kakao_nickname': kakao_nickname, 'kakao_email': email, # 로그인 처리를 위해 응답 데이터에 이메일을 포함시킨다. / 비밀번호는 패턴화 되어있다. (Kakao_ + nickname + 977) } return Response(data=response_data, status=status.HTTP_200_OK) @api_view(['POST']) @permission_classes([AllowAny]) def kakao_test(request): code = request.data.get('code') headers = { 'Content-type': 'application/x-www-form-urlencoded', } body = { 'grant_type': 'authorization_code', 'client_id': 'dcf8cc38ec4e7ec39baf6207a53ed140', 'redirect_uri': 'http://localhost:8080/loading/', 'code': code, } response = requests.post(headers=headers, url='https://kauth.kakao.com/oauth/token', data=body) pp(response.json()) access_token = response.json().get('access_token') headers = { 'Authorization': f'Bearer {access_token}', 'Content-type': 'application/x-www-form-urlencoded;charset=utf-8', } info_request = requests.get(url='https://kapi.kakao.com/v2/user/me', headers=headers) info_res = info_request.json() pp(info_res) return Response(data=info_res, status=status.HTTP_200_OK)
isaacShin-dev/kickin
accounts/social_views.py
social_views.py
py
3,846
python
ko
code
0
github-code
6
20093575148
# General import os # Tools/utils import itertools import multiprocessing from tqdm.notebook import tqdm from tqdm import tqdm as tqdm_cli from functools import reduce # for aggregate functions from itertools import chain # for aggregate functions # Data management import math import numpy as np import pandas as pd import networkx as nx import igraph as ig import leidenalg as la from community import community_louvain # Visualization import matplotlib.pyplot as plt import seaborn as sns import pygraphviz as pgv import colorcet as cc from matplotlib.colors import ListedColormap from wordcloud import WordCloud, STOPWORDS from termcolor import colored # colored text output from sklearn.preprocessing import MinMaxScaler stopwords = STOPWORDS.union({ 'regulation', 'activity', 'positive', 'negative', 'catabolic', 'process', 'protein', 'complex', 'binding', 'response', 'gene', 'genes', 'encoding', 'defining', 'GeneID', 'regulated', }) def get_tf_targ_ctx(df): tf_target_dict = {'TF': [], 'target': [], 'importance': []} tf_target_info = ( df.droplevel(axis=0, level=1).droplevel(axis=1, level=0)['TargetGenes'] .map(set) # transform each list into set .groupby('TF').agg(lambda x: reduce(lambda a, b: a.union(b), x)) # combine all targets per TF ) for tf, target_info in tf_target_info.iteritems(): tf_target_dict['TF'] += [tf for target_name, score in target_info] tf_target_dict['target'] += [target_name for target_name, score in target_info] tf_target_dict['importance'] += [score for target_name, score in target_info] return pd.DataFrame(tf_target_dict) def netgraph_community_layout(G, node_to_community, community_scale=1., node_scale=2., seed=42): """ Compute the node positions for a modular graph. """ # assert that there multiple communities in the graph; otherwise abort communities = set(node_to_community.values()) if len(communities) < 2: warnings.warn("Graph contains a single community. Unable to compute a community layout. Computing spring layout instead.") return nx.spring_layout(G, weight='importance', **kwargs) community_size = _get_community_sizes(node_to_community) community_centroids = _get_community_positions(G, node_to_community, community_scale, seed=seed) relative_node_positions = _get_node_positions(G, node_to_community, node_scale, seed=seed) # combine positions node_positions = dict() for node, community in node_to_community.items(): xy = community_centroids[node] delta = relative_node_positions[node] * community_size[community] node_positions[node] = xy + delta return node_positions def _get_community_sizes(node_to_community): """ Compute the area of the canvas reserved for each community. """ def _invert_dict(mydict): """Invert a dictionary such that values map to keys.""" inverse = dict() for key, value in mydict.items(): inverse.setdefault(value, set()).add(key) return inverse scale = (1, 1) total_nodes = len(node_to_community) max_radius = np.linalg.norm(scale) / 2 scalar = max_radius / total_nodes community_to_nodes = _invert_dict(node_to_community) community_size = {community : len(nodes) * scalar for community, nodes in community_to_nodes.items()} return community_size def _get_community_positions(G, node_to_community, community_scale, seed, simple=True): """ Compute a centroid position for each community. """ # create a weighted graph, in which each node corresponds to a community, # and each edge weight to the number of edges between communities between_community_edges = _find_between_community_edges(G, node_to_community) communities = set(node_to_community.values()) hypergraph = nx.DiGraph() hypergraph.add_nodes_from(communities) if not simple: for (ci, cj), edges in between_community_edges.items(): hypergraph.add_edge(ci, cj, weight=len(edges)) # find layout for communities pos_communities = nx.spring_layout(hypergraph, scale=community_scale, seed=seed) # set node positions to position of community pos = dict() for node, community in node_to_community.items(): pos[node] = pos_communities[community] return pos def _find_between_community_edges(G, node_to_community, fixed_community=None): """Convert the graph into a weighted network of communities.""" edges = dict() for (ni, nj) in G.edges(): ci = node_to_community[ni] cj = node_to_community[nj] if fixed_community is not None: if fixed_community != ci and fixed_community != cj: continue if ci != cj: try: edges[(ci, cj)] += [(ni, nj)] except KeyError: edges[(ci, cj)] = [(ni, nj)] return edges def _get_node_positions(G, node_to_community, node_scale, seed): """ Positions nodes within communities. """ communities = dict() for node, community in node_to_community.items(): try: communities[community] += [node] except KeyError: communities[community] = [node] pos = dict() for ci, nodes in communities.items(): subgraph = G.subgraph(nodes) pos_subgraph = nx.spring_layout(subgraph, weight='importance', scale=node_scale, seed=seed) pos.update(pos_subgraph) return pos def squeeze_graph(G, partition, approximate_size=4000): """ Squeeze graph by picking only top nodes (according to number of connections) in each partition. This step is needed to speed up the networkx visualization and show only the general POV on the graph. """ #### STEP 1 - filtering nodes # Getting the number of partitions num_partitions = len(set(partition.values())) # Getting partition parameters partition_sizes = {i: len([1 for node, k in partition.items() if k == i]) for i in range(num_partitions)} min_partition_size = min(partition_sizes.values()) # Normalizing partition size: divide each partition size by the minimal partition size normalized_partition_size = {i: (size // min_partition_size) for i, size in partition_sizes.items()} # Getting scale factor - to get approximately size of the graph close to approximate_size scale_factor = math.ceil(approximate_size / sum(normalized_partition_size.values())) squeezed_partition = {i: (size * scale_factor) for i, size in normalized_partition_size.items()} top_nodes = [] for i, num_nodes in squeezed_partition.items(): # Getting partition graph partition_i = G.subgraph([node for node, k in partition.items() if k == i]) # Finding inter-community edges intercommunity_edges = _find_between_community_edges(G, partition, i) # Calculating node importance according to number of inter-community edges node_importance = {} for (part_1, part_2), edges in intercommunity_edges.items(): for node_1, node_2 in edges: curr_node = node_1 if part_1 == i else node_2 if curr_node in node_importance: node_importance[curr_node] += 1 else: node_importance[curr_node] = 1 # Getting top nodes in the partition according to maximum number of inter-community edge (node_importance) top_nodes += list(dict(sorted(node_importance.items(), key=lambda x: x[1], reverse=True)[:squeezed_partition[i]]).keys()) filtered_partition = {node: i for node, i in partition.items() if node in top_nodes} filtered_G = G.subgraph(top_nodes) #### STEP 2 - filtering edges # Setting up the size of the squeezed graph (number of edges) keep_num_edges = 20000 edges_to_keep = \ list( dict( sorted( { (st, end): data['importance'] for st, end, data in filtered_G.edges(data=True) }.items(), key=lambda x: x[1], reverse=True)[:keep_num_edges] ).keys() ) squeezed_G = filtered_G.edge_subgraph(edges_to_keep) squeezed_partition = {node: i for node, i in filtered_partition.items() if node in squeezed_G.nodes()} return squeezed_G, squeezed_partition def get_elipsis_mask(): h, w = 600, 800 center = (int(w/2), int(h/2)) radius_x = w // 2 radius_y = h // 2 Y, X = np.ogrid[:h, :w] mask = ((X - center[0])**2/radius_x**2 + (Y - center[1])**2/radius_y**2 >= 1)*255 return mask def plot_cloud(G, partition, squeezed_pos, ax, anno_db, filter_genes=True, limit_anno_until=50, display_func=False, if_betweenness=True, k=3000): """ Plot word cloud that indicates the function(s) of each gene cluster. """ # Loading the gene functional annotation gene_func = load_gene_func_db(anno_db, reload=False, as_series=True) # Reversing partition dict -> {group_1: [gene_1, gene_2, ...], group_2: [gene_3, gene_4, ...], ...} partition_genes_ = {} for gene, i in partition.items(): if i not in partition_genes_.keys(): partition_genes_[i] = [gene] else: partition_genes_[i] += [gene] # If display gene function in the word clouds if display_func: # Whether to filter the genes on which we compute the word cloud (most important genes) if filter_genes: compute_centrality = nx.betweenness_centrality if if_betweenness else nx.closeness_centrality distance_metric = {'weight': 'distance'} if if_betweenness else {'distance': 'distance'} partition_genes = {} t = tqdm(partition_genes_.items()) for i, genes in t: t.set_description(f'Processing cluster {i}, size={G.subgraph(genes).order()}') top_len = min(limit_anno_until, len(genes)) top_gene_scores = dict( sorted( compute_centrality( G.subgraph(genes), k=min(G.subgraph(genes).order(), k), **distance_metric ).items(), key=lambda x: x[1], reverse=True )[:top_len] ) # Renormalizing centrality scores between 1 and 100, and rounding them to use later when # displaying wordclouds (higher score - higher "frequency" or word size) norm_top_gene_scores = dict( zip( top_gene_scores.keys(), list(map(lambda x: int(x), scale(list(top_gene_scores.values()), 1, 100))) ) ) partition_genes[i] = norm_top_gene_scores print('Filtered genes for generating the function word cloud..') else: partition_genes = {{gene_: 1 for gene_ in gene_list} for i, gene_list in partition_genes_.items()} # Computing functional annotation for each cluster as a concatenated list of annotations # Each annotation is weighted by its duplication gene_score times (e.g. a gene has score = 2 -> # the functional annotation is duplicated and have bigger font in WordCloud) partition_funcs = { i: ' '.join( chain.from_iterable([ gene_func[gene_func.index == gene].to_list()*gene_score for gene, gene_score in gene_score_list.items() ])) for i, gene_score_list in partition_genes.items() } # Generating word counts from aggregated gene annotation texts -> obtaining main (most frequent) function tokens word_counts = {i: WordCloud(max_words=30, min_font_size=15, stopwords=stopwords).process_text(text) for i, text in partition_funcs.items()} word_counts = { i: (freqs if freqs else {'no found function': 1}) for i, freqs in word_counts.items() } # dealing with no word case wordclouds = { i: WordCloud( max_words=30, min_font_size=15, stopwords=stopwords, background_color='white', mask=get_elipsis_mask() ).generate_from_frequencies(freqs) for i, freqs in word_counts.items() } # Display main genes in decreasing order of importance (top `top_len` genes) else: compute_centrality = nx.betweenness_centrality if if_betweenness else nx.closeness_centrality distance_metric = {'weight': 'distance'} if if_betweenness else {'distance': 'distance'} partition_genes = {} t = tqdm(partition_genes_.items()) for i, genes in t: t.set_description(f'Processing cluster {i}, size={G.subgraph(genes).order()}') top_len = min(limit_anno_until, len(genes)) top_gene_scores = dict( sorted( compute_centrality( G.subgraph(genes), k=min(G.subgraph(genes).order(), k), **distance_metric ).items(), key=lambda x: x[1], reverse=True )[:top_len] ) # Renormalizing centrality scores between 1 and 100, and rounding them to use later when # displaying wordclouds (higher score - higher "frequency" or word size) norm_top_gene_scores = dict( zip( top_gene_scores.keys(), list(map(lambda x: int(x), scale(list(top_gene_scores.values()), 1, 100))) ) ) partition_genes[i] = norm_top_gene_scores print('Obtained top genes for generating the gene word cloud..') wordclouds = { i: WordCloud( max_words=30, min_font_size=15, background_color='white', mask=get_elipsis_mask() ).generate_from_frequencies(gene_score_dict) for i, gene_score_dict in partition_genes.items() } # Plotting partition_coords = {} for gene, coords in squeezed_pos.items(): if partition[gene] not in partition_coords: partition_coords[partition[gene]] = [coords] else: partition_coords[partition[gene]] += [coords] for i, coords in partition_coords.items(): x, y = zip(*coords) min_x, max_x = min(x), max(x) min_y, max_y = min(y), max(y) ax.imshow(wordclouds[i], interpolation='bilinear', extent=[min_x, max_x, min_y, max_y]) return ax def process_communities(data, pat=None, algo='leiden', filter_quantile=0.95, if_betweenness=True, limit_anno_until=50, k=5000, save_top_intercommunity_links_until=20, other_functions_until=20, save_top_new_found_cluster_links=20, seed=42): """ Process graph by finding its communities, annotate its communities, and save everything into .tsv format. """ from joblib import Parallel, delayed def highlight_TFs(word, font_size, position, orientation, font_path, random_state): TF_color = (255, 0, 0) # red if word in lambert_TF_names or word in dorothea_TF_names: return TF_color else: r, g, b, alpha = plt.get_cmap('viridis')(font_size / 120) return (int(r * 255), int(g * 255), int(b * 255)) print('\nPerforming community analysis..\n\n') # Setting pathways to files _PROJ_PATH = '/gpfs/projects/bsc08/bsc08890' _FMETA = os.path.join(_PROJ_PATH, 'data/GSE145926_RAW/metadata.tsv') _DATA_HOME = os.path.join(_PROJ_PATH, 'res/covid_19') # Loading sample meta data, reordering patients full_meta = pd.read_csv(_FMETA, sep='\t', index_col=0) # Prepare everything to save the figs and dataframe if data == 'all_data': data = 'raw_data' elif 'raw_data_' not in data: data = f'raw_data_{data}' else: pass if pat is None or pat == 'all_data': # Cell-type aggregated data data_folder = 'all_data' if data == 'raw_data' else data.replace('raw_data_', '') figs_as = os.path.join(_DATA_HOME, 'cell_types', data_folder, 'figs', 'grnboost2', f'raw_data') data_to = os.path.join(_DATA_HOME, 'cell_types', data_folder, 'data', 'grnboost2', f'{algo}_communities') data_as = os.path.join(data_to, f'raw_data_communities_info.pickle') elif pat in ['C', 'M', 'S']: # Patient-type aggregated data data_folder = 'all_data' if data == 'raw_data' else data.replace('raw_data_', '') figs_as = os.path.join(_DATA_HOME, 'cell_types', data_folder, 'figs', 'grnboost2', f'raw_data_{pat}_type') data_to = os.path.join(_DATA_HOME, 'cell_types', data_folder, 'data', 'grnboost2', f'{algo}_communities') data_as = os.path.join(data_to, f'raw_data_{pat}_type_communities_info.pickle') else: # Loading patient-specific data figs_as = os.path.join(_DATA_HOME, pat, 'figs', 'grnboost2', f'{data}') data_to = os.path.join(_DATA_HOME, pat, 'data', 'grnboost2', f'{algo}_communities') data_as = os.path.join(data_to, f'{data}_communities_info.pickle') os.makedirs(data_to, exist_ok=True) os.makedirs(os.path.dirname(figs_as), exist_ok=True) # Loading lists of TFs from Lambert 2018 and DoRothEA, in the latter case we will keep only confident regulons lambert_TF_names = pd.read_csv(os.path.join(_PROJ_PATH, 'data/TF_lists/lambert2018.txt'), header=None)[0].to_list() dorothea_TF_names = list( pd.read_csv(os.path.join(_PROJ_PATH, 'data/TF_lists/dorothea_regulons.tsv'), sep='\t') \ .loc[lambda x: x['confidence'].isin(['A', 'B', 'C'])]['tf'].unique() ) # Loading the graph G = get_nx_graph(data=data, data_type='all', pat=pat, get_filtered=filter_quantile) print(f"Loaded the graph: {colored('pat', 'green')}='{colored(pat, 'red')}', " f"{colored('data', 'green')}='{colored(data, 'red')}', " f"{colored('data_type', 'green')}='{colored('all', 'red')}'\n") ###### FINDING COMMUNITIES IN THE GRAPH ####### print('Finding communities in the graph..') if algo == 'louvain': partition = community_louvain.best_partition(G.to_undirected(), weight='importance', random_state=seed) elif algo == 'leiden': G_igraph = ig.Graph.from_networkx(G.to_undirected()) la_partition = la.find_partition(G_igraph, la.ModularityVertexPartition, weights='importance', seed=seed) partition = {G_igraph.vs[node]['_nx_name']: i for i, cluster_nodes in enumerate(la_partition) for node in cluster_nodes} else: raise NotImplementedError num_partitions = len(set(partition.values())) print(f'Number of partitions using {algo} algorithm: {colored(num_partitions, "cyan")}\n') ###### FINDING HIGH-CENTRALITY GENES IN THE WHOLE GRAPH print('Finding high-centrality genes in the whole graph..') num_workers = max(multiprocessing.cpu_count() // 2, 1) whole_G_central_genes = dict( sorted(betweenness_centrality_parallel(G, processes=num_workers).items(), key=lambda x: x[1], reverse=True)[:limit_anno_until] ) print(f'Computed the {"betweenness" if if_betweenness else "closeness"} centrality for all genes in the graph\n') ###### FINDING HIGH-CENTRALITY GENES AND CORRESPONDING FUNCTIONS IN EACH COMMUNITY USING GO ANNOTATION ###### print('Finding high-centrality genes/functions in each cluster..') # Loading the gene functional annotation anno_db_tags = ['GO', 'KEGG', 'immunological', 'hallmark'] gene_func_dbs = {tag: load_gene_func_db(tag, as_series=True) for tag in anno_db_tags} # Reversing partition dict -> {group_1: [gene_1, gene_2, ...], group_2: [gene_3, gene_4, ...], ...} partition_genes_ = {} for gene, i in partition.items(): if i not in partition_genes_.keys(): partition_genes_[i] = [gene] else: partition_genes_[i] += [gene] # Whether to filter the genes on which we compute the word cloud (most important genes) compute_centrality = nx.betweenness_centrality if if_betweenness else nx.closeness_centrality distance_metric = {'weight': 'distance'} if if_betweenness else {'distance': 'distance'} all_partition_genes = {} norm_partition_genes = {} t = tqdm_cli(partition_genes_.items(), ascii=True) for i, genes in t: t.set_description(f'Processing cluster {i}, size={G.subgraph(genes).order()}') gene_scores = dict( sorted( compute_centrality( G.subgraph(genes), k=min(G.subgraph(genes).order(), k), normalized=True, **distance_metric ).items(), key=lambda x: x[1], reverse=True ) ) all_partition_genes[i] = gene_scores central_gene_scores = {gene: gene_scores[gene] for k, gene in enumerate(gene_scores.keys()) if k < limit_anno_until} # Renormalizing centrality scores between 1 and 100, and rounding them to use later when # displaying wordclouds (higher score - higher "frequency" or word size) norm_partition_genes[i] = dict( zip( central_gene_scores.keys(), list(map(lambda x: int(x), scale(list(central_gene_scores.values()), 1, 100))) ) ) print('Computed centrality scores for each gene in each community\n') print('Finding functional annotations for each cluster..') # Computing functional annotation for each cluster as a concatenated list of annotations # Each annotation is weighted by its duplication gene_score times (e.g. a gene has score = 2 -> # the functional annotation is duplicated and have bigger font in WordCloud) # We also do it for different functional annotations like GO, KEGG, Hallmark, etc.. partition_funcs = { tag: { i: ' '.join( chain.from_iterable([ gene_func[gene_func.index == gene].to_list()*gene_score for gene, gene_score in gene_score_list.items() ])) for i, gene_score_list in norm_partition_genes.items() } for tag, gene_func in gene_func_dbs.items() } print('Computed functional annotations for each cluster\n') ###### PLOTTING GENE AND FUNC COMMUNITY CLOUDS ###### print('Plotting clusters..') # Getting positions of squeezed graph - we do not plot every gene on the figure squeezed_G, squeezed_partition = squeeze_graph(G, partition) print('Computed a squeezed graph representation..') squeezed_pos = netgraph_community_layout(squeezed_G, squeezed_partition, seed=seed) # nx.nx_agraph.pygraphviz_layout(G.to_undirected(), prog="sfdp") # nx.nx.spring_layout(G, seed=seed, k=0.2, iterations=20) partition_coords = {} for gene, coords in squeezed_pos.items(): if partition[gene] not in partition_coords: partition_coords[partition[gene]] = [coords] else: partition_coords[partition[gene]] += [coords] print('Computed node positions of the squeezed graph representation..') cmap = ListedColormap(sns.color_palette(cc.glasbey_bw, n_colors=num_partitions).as_hex()) for plot_type in ['genes'] + list(map(lambda x: f"func_{x}", anno_db_tags)): if plot_type.startswith('func'): # Getting current functional annotation curr_partition_funcs = partition_funcs[plot_type[plot_type.find('_') + 1:]] f, ax = plt.subplots(figsize=(20, 35)) if plot_type == 'genes': wordclouds = { i: WordCloud( max_words=30, min_font_size=15, background_color='white', mask=get_elipsis_mask() ).generate_from_frequencies(gene_score_dict).recolor(color_func=highlight_TFs) for i, gene_score_dict in norm_partition_genes.items() } else: word_counts = { i: WordCloud(max_words=30, min_font_size=15, stopwords=stopwords).process_text(text) for i, text in curr_partition_funcs.items() } word_counts = { i: (freqs if freqs else {'no found function': 1}) for i, freqs in word_counts.items() } # dealing with no word case wordclouds = { i: WordCloud( max_words=30, min_font_size=15, stopwords=stopwords, background_color='white', mask=get_elipsis_mask() ).generate_from_frequencies(freqs) for i, freqs in word_counts.items() } # Plotting clouds for i, coords in partition_coords.items(): x, y = zip(*coords) min_x, max_x = min(x), max(x) min_y, max_y = min(y), max(y) ax.imshow(wordclouds[i], interpolation='bilinear', extent=[min_x, max_x, min_y, max_y]) print(f'Finished plotting {plot_type} word cloud..') nx.draw(squeezed_G, squeezed_pos, ax=ax, arrowstyle="->", arrowsize=20, connectionstyle=f'arc3, rad = 0.25', edge_color='gray', width=0.4, node_color='k', node_size=50, alpha=0.02) nx.draw_networkx_nodes(squeezed_G, squeezed_pos, ax=ax, node_size=100, nodelist=list(squeezed_partition.keys()), node_color=list(squeezed_partition.values()), cmap=cmap, alpha=0.005) print(f'Finished plotting {plot_type} nodes..') ax.set_title(f'Found communities ({pat}, "all", {data}), ' f'annotation - {plot_type}', fontsize=30) plt.axis('off') plt.savefig(f'{figs_as}_{plot_type}.png', bbox_inches='tight', dpi=400) print('Finished plotting..\n') ###### SAVING DATAFRAME CONTAINING INFORMATION ABOUT EACH COMMUNITY ###### def compute_community_info(i): """ Parallel saving of the dataframe. """ # Getting information for each community genes = list(all_partition_genes[i].keys()) community_subgraph = G.subgraph(genes) communities_i = pd.Series(dtype='object') # Setting tqdm logs # t.set_description(f'Saving info about {i} cluster, size={community_subgraph.order()}') # Getting information about cluster genes central_genes_and_scores = { gene: all_partition_genes[i][gene] for k, gene in enumerate(genes) if k < limit_anno_until } non_lambert_TFs = [ f'{gene} (rank={k})' for k, gene in enumerate(central_genes_and_scores.keys(), start=1) if gene not in lambert_TF_names ] non_dorothea_TFs = [ f'{gene} (rank={k})' for k, gene in enumerate(central_genes_and_scores.keys(), start=1) if gene not in dorothea_TF_names ] # Filling dataframe with the information communities_i['num_nodes'] = community_subgraph.number_of_nodes() communities_i['num_edges'] = community_subgraph.number_of_edges() communities_i['all_sorted_genes'] = '; '.join( f'{gene} (score={score})' for gene, score in all_partition_genes[i].items() ) communities_i['sorted_central_genes_scores'] = '; '.join( f'{gene} (score={score:.2f})' for gene, score in central_genes_and_scores.items() ) communities_i['non_lambert_2018_TF_central_genes'] = '; '.join(non_lambert_TFs) communities_i['non_dorothea_TF_central_genes'] = '; '.join(non_dorothea_TFs) communities_i['whole_G_central_genes_scores'] = '; '.join( f'{gene} (score={score:.2f})' for gene, score in whole_G_central_genes.items() ) # Filling information about newly found gene-gene links (based on absence in KEGG and Hallmark) top_cluster_links = set() iter_i = 0 for st, end, edge_info in sorted(community_subgraph.edges(data=True), key=lambda t: t[2]['importance'], reverse=True): # If the current (reverse directed) link was not encountered previously.. if (end, st) not in [(uniq_st, uniq_end) for uniq_st, uniq_end, _ in top_cluster_links]: top_cluster_links.add((st, end, edge_info['importance'])) iter_i += 1 if iter_i == save_top_new_found_cluster_links: break for anno_tag in ['KEGG', 'hallmark']: curr_db = load_gene_func_db(anno_tag) tmp_list = [] # if `st` gene and `end` gene have non-overlapping annotations.. for st, end, imp in top_cluster_links: st_anno_IDs = set(curr_db[curr_db.index == st]['ID']) end_anno_IDs = set(curr_db[curr_db.index == end]['ID']) if len(st_anno_IDs.intersection(end_anno_IDs)) == 0 and \ (len(st_anno_IDs) != 0 or len(end_anno_IDs) != 0): tmp_list.append(f"{st} ({' & '.join(st_anno_IDs)}) <-> {end} ({' & '.join(end_anno_IDs)})") communities_i[f'new_gene_gene_links_{anno_tag}'] = '; '.join(tmp_list) # Filling information about cluster functions for tag, gene_func in gene_func_dbs.items(): curr_partition_funcs = partition_funcs[tag] # Filling main functions - non duplicates at the top main_functions = list(dict.fromkeys([ # dropping duplicates, but preserving order func for gene in central_genes_and_scores.keys() for func in gene_func[gene_func.index == gene].to_list() ])) gene_with_main_functions = [ ','.join( gene_func[gene_func == func].loc[lambda x: x.index.isin(genes)].index.to_list() ) for func in main_functions ] main_functions = [ f'>>> {func} <<<: {gene}' for gene, func in zip(gene_with_main_functions, main_functions) ] communities_i[f'main_functions_{tag}'] = '; '.join(main_functions) # saving.. # Saving functions corresponding to each gene central_functions_per_gene = [ f">>> {gene} <<<: {' & '.join(gene_func[gene_func.index == gene].to_list())}" for gene in central_genes_and_scores.keys() ] communities_i[f'sorted_central_functions_{tag}'] = '; '.join(central_functions_per_gene) # saving.. # Saving most frequent function words freq_words = WordCloud( max_words=30, min_font_size=15, stopwords=stopwords ).process_text(curr_partition_funcs[i]) freq_words = dict( sorted(freq_words.items(), key=lambda x: x[1], reverse=True) ) if freq_words else {'no found function': 1} # dealing with no word case communities_i[f'most_frequent_function_words_{tag}'] = '; '.join(freq_words.keys()) # saving # Saving other functions present in this cluster other_functions = list(dict.fromkeys([ # dropping duplicates, but preserving order func for gene in genes if gene not in central_genes_and_scores.keys() for func in gene_func[gene_func.index == gene].to_list() if func not in main_functions ]))[:other_functions_until] genes_with_other_functions = [ ','.join( gene_func[gene_func == func].loc[lambda x: x.index.isin(genes)].index.to_list() ) for func in other_functions ] other_functions = [ f'>>> {func} <<<: {gene}' for gene, func in zip(genes_with_other_functions, other_functions) ] communities_i[f'other_functions_{tag}'] = '; '.join(other_functions) # saving # Filling information about top inter-community links # t_sub = tqdm(range(num_partitions), ascii=True, leave=False) for k in range(num_partitions): # t_sub: # t_sub.set_description(f'Extracting top inter-community links with {k}') if i != k: genes_in_k = list(all_partition_genes[k].keys()) # Getting the subgraph that contains central genes in community_i and all genes in comunity_k G_central_i_k = G.subgraph(list(central_genes_and_scores.keys()) + genes_in_k) # Getting the subgraph that contains all genes from community_i and community_k G_i_k = G.subgraph(genes + genes_in_k) # Creating two helper sets that allow us to keep only unique links links_central_i_k = set() links_i_k = set() iter_i = 0 # Getting out top links from the second subgraph for st, end, edge_info in sorted(G_central_i_k.edges(data=True), key=lambda t: t[2]['importance'], reverse=True): # If the current (reverse directed) link was not encountered previously.. if (end, st) not in [(uniq_st, uniq_end) for uniq_st, uniq_end, _ in links_central_i_k] and \ ((st in genes and end not in genes) or (end in genes and st in genes)): links_central_i_k.add((st, end, edge_info['importance'])) iter_i += 1 if iter_i == save_top_intercommunity_links_until: break iter_i = 0 # Getting out top links from the second subgraph for st, end, edge_info in sorted(G_i_k.edges(data=True), key=lambda t: t[2]['importance'], reverse=True): # If the current (reverse directed) link was not encountered previously.. if (end, st) not in [(uniq_st, uniq_end) for uniq_st, uniq_end, _ in links_i_k] and \ ((st in genes and end not in genes) or (end in genes and st in genes)): links_i_k.add((st, end, edge_info['importance'])) iter_i += 1 if iter_i == save_top_intercommunity_links_until: break # Adding top links to the dataframe communities_i[f'top_links_scores_central_genes<->community_{k}'] = \ '; '.join(f'{st} <-> {end} (score={score:.2f})' for st, end, score in links_central_i_k) communities_i[f'top_links_scores_with_community_{k}'] = \ '; '.join([f'{st} <-> {end} (score={score:.2f})' for st, end, score in links_i_k]) return communities_i print('Saving info dataframe..') t = tqdm_cli(range(num_partitions), ascii=True) # Getting dataframe result = Parallel(n_jobs=num_workers)(delayed(compute_community_info)(i) for i in t) communities_df = pd.concat(result, axis=1).T.reindex( columns=[ 'num_nodes', 'num_edges', 'main_functions_GO', 'main_functions_KEGG', 'main_functions_immunological', 'main_functions_hallmark', 'non_lambert_2018_TF_central_genes', 'non_dorothea_TF_central_genes', 'new_gene_gene_links_KEGG', 'new_gene_gene_links_hallmark', 'whole_G_central_genes_scores', 'other_functions_GO', 'other_functions_KEGG', 'other_functions_immunological', 'other_functions_hallmark', 'sorted_central_genes_scores', 'sorted_central_functions_GO', 'sorted_central_functions_KEGG', 'sorted_central_functions_immunological', 'sorted_central_functions_hallmark', 'most_frequent_function_words_GO', 'most_frequent_function_words_KEGG', 'most_frequent_function_words_immunological', 'most_frequent_function_words_hallmark', 'all_sorted_genes'] + [f'top_links_scores_central_genes<->community_{i}' for i in range(num_partitions)] + [f'top_links_scores_with_community_{i}' for i in range(num_partitions) ] ) # Saving dataframe communities_df.to_pickle(data_as) print(f"Saved the data to {data_as}!\n") def run_enrichr(data, is_communities=False, is_positive_markers=True, group_types = 'all', on_targets=False, choose_fixed_tf=None, data_type='all', top_n=50, algo='leiden', enrichr_library='MSigDB_Hallmark_2020'): """ Run enrichment analysis with Enrichr. """ import json import requests import sys import io out_folder = 'community_ana' if is_communities else 'cohort_ana' if is_communities == True: print('Running EnrichR on communities..') algo = 'leiden' _DATA_HOME = '/gpfs/projects/bsc08/bsc08890/res/covid_19' if data_type == 'all': community_data = pd.read_pickle(os.path.join( _DATA_HOME, 'cell_types', data, 'data', 'grnboost2', f'{algo}_communities', f'raw_data_communities_info.pickle' )) else: community_data = pd.read_pickle(os.path.join( _DATA_HOME, 'cell_types', data, 'data', 'grnboost2', f'{algo}_communities', f'raw_data_{data_type}_type_communities_info.pickle' )) df = pd.concat([ pd.DataFrame({ 'cluster': f'cluster_{i}', 'gene': [el[: el.find(' ')] for el in vals.split('; ')][:top_n] }) for i, vals in community_data['all_sorted_genes'].iteritems() ], axis=0).reset_index(drop=True) else: if on_targets: print('Running EnrichR on targets between 3 group types..') types = ['C', 'M', 'S'] df = pd.concat([ pd.read_csv( f'/gpfs/home/bsc08/bsc08890/tmp/cohort_ana/tmp_enrichr_{data}_{t}_{choose_fixed_tf}_target_list.tsv', header=None, names=['gene'] ).assign(cluster=t) for t in types ], axis=0) else: if group_types == 'all': print('Running EnrichR on TFs between 3 group types..') df = pd.read_csv(f'/gpfs/home/bsc08/bsc08890/tmp/tf_markers_df_{data}.tsv', sep='\t') else: print('Running EnrichR on 2 group types..') if group_types == 'M_S': group_types = 'S_M' if group_types == 'C_M': group_types = 'M_C' if group_types == 'C_S': group_types = 'S_C' df_1 = pd.read_csv(f'/gpfs/home/bsc08/bsc08890/tmp/tf_markers_df_{group_types}_{data}.tsv', sep='\t') df_1['gene'] = df_1.index df_2 = df_1.copy() df_2['avg_log2FC'] = - df_2['avg_log2FC'] df_1['cluster'], df_2['cluster'] = group_types.split('_') df = pd.concat([df_1, df_2], axis=0) if is_positive_markers: df = df[(df['p_val_adj'] < 0.05) & (df['avg_log2FC'] > 1)] else: df = df[(df['p_val_adj'] < 0.05) & (df['avg_log2FC'] < -1)] cluster_dfs = {} for cl in df['cluster'].unique(): print(f'Processing {cl}..') ENRICHR_URL = 'http://amp.pharm.mssm.edu/Enrichr/addList' genes_str = '\n'.join(df[df['cluster'] == cl]['gene']) description = f"{data}_{data_type}_{cl}" if is_communities == True: filename = f'tmp/{out_folder}/tmp_enrichr_{data}_{data_type}_{cl}.tsv' elif on_targets: filename = f'tmp/{out_folder}/tmp_enrichr_{data}_{data_type}_{choose_fixed_tf}_target_{cl}.tsv' elif group_types == 'all': filename = f'tmp/{out_folder}/tmp_enrichr_{data}_{data_type}_{cl}.tsv' else: filename = f'tmp/{out_folder}/tmp_enrichr_{data}_2_groups_{cl}.tsv' payload = { 'list': (None, genes_str), 'description': (None, description) } response = requests.post(ENRICHR_URL, files=payload) if not response.ok: raise Exception('Error analyzing gene list') job_id = json.loads(response.text) ################################################################################ # Get enrichment results # ENRICHR_URL = 'http://amp.pharm.mssm.edu/Enrichr/export' query_string = '?userListId=%s&filename=%s&backgroundType=%s' user_list_id = str(job_id['userListId']) gene_set_library = str(enrichr_library) url = ENRICHR_URL + query_string % (user_list_id, filename, gene_set_library) response = requests.get(url, stream=True) print(' Enrichr API : Downloading file of enrichment results: Job Id:', job_id) with open(filename, 'wb') as f: for chunk in response.iter_content(chunk_size=1024): if chunk: f.write(chunk) print(f' Saved to {filename}') cluster_dfs[cl] = pd.read_csv(filename, sep='\t') return cluster_dfs def betweenness_centrality_parallel(G, processes=None): """Parallel betweenness centrality function""" from multiprocessing import Pool def chunks(l, n): """Divide a list of nodes `l` in `n` chunks""" l_c = iter(l) while 1: x = tuple(itertools.islice(l_c, n)) if not x: return yield x p = Pool(processes=processes) node_divisor = len(p._pool) * 4 node_chunks = list(chunks(G.nodes(), int(G.order() / node_divisor))) num_chunks = len(node_chunks) bt_sc = p.starmap( nx.betweenness_centrality_subset, zip( [G] * num_chunks, node_chunks, [list(G)] * num_chunks, [True] * num_chunks, ['distance'] * num_chunks ), ) # Reduce the partial solutions bt_c = bt_sc[0] for bt in bt_sc[1:]: for n in bt: bt_c[n] += bt[n] return bt_c
masyahook/Single-cell-gene-regulatory-networks
scGRN/func.py
func.py
py
43,101
python
en
code
0
github-code
6
28031461245
#!/usr/bin/python3 from time import sleep from datetime import date, datetime from pynput.keyboard import Key, Controller from logging.handlers import RotatingFileHandler import sys, signal, argparse, logging, platform, subprocess # ----------------------------------Configuration-------------------------------- VOLUME = "0.3" BREAK_NUM = 1 WORK_DURATION = 900 BREAK_DURATION = 120 MAC = False LINUX = False WINDOWS = False LINUX_PATH = "" MAC_PATH = "/Users/mutnawaz/Desktop/Muteeb/Code/timer/" WINDOWS_PATH = "C:\\Users\\Muteeb\\Desktop\\RV Major Project\\Personal\\timer\\" # ---------------------------------end of Configuration--------------------------- log = None def __init_logger(): global log if log is not None: log.debug("logger already initialized.") return None try: "log format <data/time:level:filename:line:function:message>" log_formatter = logging.Formatter("%(levelname)5.5s %(filename)5s#%(lineno)3s %(message)s") "Refer the log file path" PATH = get_path() log_file = PATH + "timer.log" "Max size of the log file is 2MB, it rotate if size exceeds" handler = RotatingFileHandler( log_file, mode="a", maxBytes=(2 * 1024 * 1024), backupCount=4, encoding=None, delay=0, ) "appy the log format and level" handler.setFormatter(log_formatter) handler.setLevel(logging.DEBUG) log = logging.getLogger("timer.log") log.setLevel(logging.DEBUG) "apply the settings to the log" log.addHandler(handler) log.debug("Start logging the times") return handler except Exception as e: log.error("Failed to create logger: %s", str(e)) def exit_handler(sig, frame): print("\nGood bye. Have a nice day!\n") greet() sys.exit(0) def greet(): try: print(subprocess.check_output("motivate", shell=True, stderr=subprocess.DEVNULL).decode()) except: print("\n******************************************************") print("* *") print("* *") print("* You can do it! Sending lots of energy to you :) *") print("* *") print("* *") print("******************************************************") def get_time(): now = datetime.now() time = now.strftime("%H:%M:%S") return time def play_sound(sound_file): if MAC: subprocess.check_output("afplay --volume " + VOLUME + " {}".format(sound_file), shell=True) elif LINUX: subprocess.check_output("aplay -q {}&".format(sound_file), shell=True) else: winsound.PlaySound(sound_file, winsound.SND_ASYNC) def get_path(): if MAC: return MAC_PATH elif LINUX: return LINUX_PATH else: return WINDOWS_PATH def display_sleep(): if MAC: # subprocess.check_output("pmset displaysleepnow", shell=True) # Put system to sleep. subprocess.check_output("open -a ScreenSaverEngine", shell=True) def wakeup(): if MAC: # subprocess.check_output("pmset relative wake 1", shell=True) # Wakeup the system. # log.debug("Waking up.") keyboard = Controller() key = Key.esc keyboard.press(key) keyboard.release(key) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("-s", "--slient", action="store_true", help="Run in silent mode.") args = vars(parser.parse_args()) if platform.system() == "linux" or platform.system() == "linux2": LINUX = True elif platform.system() == "darwin" or platform.system() == "Darwin": MAC = True elif platform.system() == "win32" or platform.system() == "Windows": WINDOWS = True if not args["slient"]: try: import winsound except Exception as e: print("Sound is not supported in windows. Reason: {0}".format(e)) args["slient"] = True __init_logger() PATH = get_path() signal.signal(signal.SIGINT, exit_handler) greet() if args["slient"]: print("Running in slient mode...") log.info("Today's date: {0}".format(date.today())) if not args["slient"]: play_sound(PATH + "start_timer.wav") while True: log.info("Work number {0}, start time {1}".format(BREAK_NUM, get_time())) sleep(WORK_DURATION) log.info("Work number {0}, end time {1}".format(BREAK_NUM, get_time())) if not args["slient"]: play_sound(PATH + "take_break.wav") display_sleep() log.info("Break number {0}, start time {1}".format(BREAK_NUM, get_time())) sleep(BREAK_DURATION) log.info("Break number {0}, end time {1}".format(BREAK_NUM, get_time())) if not args["slient"]: play_sound(PATH + "two_mins_up.wav") wakeup() BREAK_NUM += 1
muteebakram/Timer
main.py
main.py
py
5,198
python
en
code
0
github-code
6
22791755556
import sys sys.path.insert(0, '../../class') import os import time import nnet import cubelattice as cl import multiprocessing from functools import partial from scipy.io import loadmat import numpy as np import argparse if __name__ == "__main__": parser = argparse.ArgumentParser(description='Verification Settings') parser.add_argument('--property', type=str, default='1') parser.add_argument('--n1', type=int, default=2) parser.add_argument('--n2', type=int, default=3) parser.add_argument('--compute_unsafety', action='store_true') args = parser.parse_args() i = args.n1 j = args.n2 def verification(afv): safe = True return safe print("neural_network_"+str(i)+str(j)) nn_path = "nets/neural_network_information_"+str(i)+str(j)+".mat" filemat = loadmat(nn_path) if not os.path.isdir('logs'): os.mkdir('logs') W = filemat['W'][0] b = filemat['b'][0] lb = [-0.1,-0.1,-0.1] ub = [0.1,0.1,0.1] nnet0 = nnet.nnetwork(W, b) nnet0.verification = verification initial_input = cl.cubelattice(lb, ub).to_lattice() cpus = multiprocessing.cpu_count() pool = multiprocessing.Pool(cpus) nnet0.start_time = time.time() nnet0.filename = "logs/output_info"+str(i)+str(j)+'.txt' outputSets = [] nputSets0 = nnet0.singleLayerOutput(initial_input, 0) pool.map(partial(nnet0.layerOutput, m=1), nputSets0) pool.close() elapsed_time = time.time() - nnet0.start_time print('time elapsed: %f seconds \n' % elapsed_time) print('result: safe\n') filex = open(nnet0.filename, 'w') filex.write('time elapsed: %f seconds \n' % elapsed_time) filex.write('result: safe\n') filex.close()
Shaddadi/veritex
examples/Microbenchmarks/main.py
main.py
py
1,739
python
en
code
10
github-code
6
24044811304
#compare parameter between abc-smc import seaborn as sns import pandas as pd import numpy as np import matplotlib.pyplot as plt import sys from scipy import stats from matplotlib.colors import LogNorm, Normalize from scipy.signal import argrelextrema filename=["ACDC_X2","ACDC_Y2","ACDC_Z2"]#,"ACDC_all"] #filename=['ACDC_X2'] filename=['ACDC_X2','ACDC_X21ind'] n=['final'] #n=['1','2','3','4','5','6','7','8','9','10','11','12','final']#'13','14','15','final'] #n=['1','2','3','4','5','6','7','8','9','10','11','12','13','final']#,'12','13','14','final']#,'15']#,'final'] path='C:/Users/Administrator/Desktop/Modeling/AC-DC/' path='/users/ibarbier/AC-DC/' sys.path.insert(0, path + filename[0]) import model_equation as meq parlist=meq.parlist namelist=[] for i,par in enumerate(parlist): namelist.append(parlist[i]['name']) par0 = { 'K_ARAX':-3.5,#0.01, 'n_ARAX':2, 'K_XY':-2.5, 'n_XY':2, 'K_XZ':-1.55,#-1.25 'n_XZ':2, 'beta_X':1, 'alpha_X':0, 'delta_X':1, 'K_ARAY':-3.5, 'n_ARAY':2, 'K_YZ':-3.5, 'n_YZ':2, 'beta_Y':1, 'alpha_Y':0, 'delta_Y':1, 'K_ZX':-2.5, 'n_ZX':2, 'beta_Z':1, 'alpha_Z':0, 'delta_Z':1, 'beta/alpha_X':2, 'beta/alpha_Y':2, 'beta/alpha_Z':2 } def pars_to_dict(pars,parlist): ### This function is not necessary, but it makes the code a bit easier to read, ### it transforms an array of pars e.g. p[0],p[1],p[2] into a ### named dictionary e.g. p['k0'],p['B'],p['n'],p['x0'] ### so it is easier to follow the parameters in the code dict_pars = {} for ipar,par in enumerate(parlist): dict_pars[par['name']] = pars[ipar] return dict_pars def load(number= n,filename=filename,parlist=parlist): namelist=[] for i,par in enumerate(parlist): namelist.append(parlist[i]['name']) number=str(number) filepath = path+filename+'/smc/pars_' + number + '.out' dist_path = path+filename+'/smc/distances_' + number + '.out' raw_output= np.loadtxt(filepath) dist_output= np.loadtxt(dist_path) df = pd.DataFrame(raw_output, columns = namelist) df['dist']=dist_output df=df.sort_values('dist',ascending=False) distlist= sorted(df['dist']) p=[] for dist in distlist: p_0=df[df['dist']==dist] p0=[] for n in namelist: p0.append(p_0[n].tolist()[0]) p0=pars_to_dict(p0,parlist) p.append(p0) return p, df def get_stats(filename,namelist): stats_df = pd.DataFrame( columns = ['par','file','mean','sd','mode']) parl = np.append(namelist,'dist') # for fi,fnm in enumerate(filename): fnm=filename[0] p,df= load(n[0],fnm,parlist) mean=np.mean(df).tolist() sd=np.std(df).tolist() mode=stats.mode(df)[0][0] new_row={'par':parl,'file':[fnm]*len(parl),'mean':mean,'sd':sd,'mode':mode} df2=pd.DataFrame(new_row) stats_df =stats_df.append(df2) return stats_df def bar_plot(filename,namelist, t="mean"): stats_df=get_stats(filename,namelist) # set width of bars barWidth = 0.20 # Set position of bar on X axis r1 = np.arange(len(parl)) #mean if t=="mean": for i,nm in enumerate(filename): v=stats_df[stats_df['method']==nm] plt.bar((r1+barWidth*i),v['mean'],yerr=v['sd'], capsize=2,width=barWidth, label=nm) plt.xlabel('par', fontweight='bold') plt.xticks([r + barWidth for r in range(len(parl))], parl) plt.legend() plt.show() #mode if t == "mode": for i,nm in enumerate(filename): v=stats_df[stats_df['method']==nm] plt.bar((r1+barWidth*i),v['mode'],width=barWidth, label=nm) plt.xlabel('par', fontweight='bold') plt.xticks([r + barWidth for r in range(len(parl))], parl) plt.legend() plt.show() def plot_compare(n,filename,namelist): parl = np.append(namelist,'dist') index=1 size=round(np.sqrt(len(parl))) for i,name in enumerate(parl): plt.subplot(size,size,index) plt.tight_layout() for fi,fnm in enumerate(filename): p,df= load(n,fnm,namelist1) sns.kdeplot(df[name],bw_adjust=.8,label=fnm) #plt.ylim(0,1) if i < (len(parl)-2): plt.xlim((parlist[i]['lower_limit'],parlist[i]['upper_limit'])) index=index+1 if index==5: plt.legend(bbox_to_anchor=(1.05, 1)) #sns.kdeplot(df['K_XZ']) plt.savefig(str(filename)+str(n)+"_compareplot.pdf", bbox_inches='tight') plt.show() #plot_compare(n[0],filename,namelist) def plot_alltime(filename,namelist): parl = np.append(namelist,'dist') index=1 for i,name in enumerate(parl): plt.subplot(4,4,index) plt.tight_layout() for ni,nmbr in enumerate(n): p,df= load(nmbr,filename[0],parlist) sns.kdeplot(df[name],bw_adjust=.8,label=nmbr) #plt.ylim(0,1) if i < (len(parl)-2): plt.xlim((parlist[i]['lower_limit'],parlist[i]['upper_limit'])) index=index+1 #if index==5: plt.legend(bbox_to_anchor=(1.05, 1)) plt.show() #plot_alltime(['ACDC_X2'],namelist) def plotdistpar(filename,namelist): index=1 for ni,nb in enumerate(n): p,df= load(nb,filename[0],parlist) for i,name in enumerate(namelist): plt.subplot(len(n),len(namelist),index) # plt.tight_layout() plt.scatter(df['dist'],df[name],s=1) mean=np.mean(df[name]).tolist() mode=stats.mode(df[name])[0][0] plt.plot([0,40],[mean,mean],'r',label="mean") plt.plot([0,40],[mode,mode],'g',label="meode") plt.ylim((parlist[i]['lower_limit'],parlist[i]['upper_limit'])) plt.ylabel(name) index=index+1 plt.legend(bbox_to_anchor=(1.05, 1)) plt.show() ''' ARA=np.logspace(-4.5,-2.,10,base=10) p,df= load(n[0],filename[0],parlist) stdf=get_stats(filename,namelist) pmean=pars_to_dict(stdf['mean']) pmode=pars_to_dict(stdf['mode']) for i,p in enumerate([p[0],pmean,pmode,p[999]]): X,Y,Z=meq.model(ARA,p) df_X=pd.DataFrame(X,columns=ARA) df_Y=pd.DataFrame(Y,columns=ARA) df_Z=pd.DataFrame(Z,columns=ARA) plt.subplot(4,3,(1+3*i)) sns.heatmap(df_X, cmap="Reds") plt.subplot(4,3,(2+3*i)) sns.heatmap(df_Y, cmap ='Blues') plt.subplot(4,3,(3+3*i)) sns.heatmap(df_Z, cmap ='Greens') plt.show() X,Y,Z=meq.model(ARA,pmode) plt.plot(X[:,0],label="DCoff") plt.plot(X[:,3],label="AC1") plt.plot(X[:,6],label="AC2") plt.plot(X[:,9],label="DCon") plt.plot([200,200],[0,1000],'--') plt.legend(bbox_to_anchor=(1.05, 1)) plt.tight_layout() plt.show() ''' #####1indvs2ind def plotdesnity1vs2(): p2,df2= load('final','ACDC_X2',parlist) parlist1=parlist.copy() del parlist1[7:9] p1,df1= load('final','ACDC_X21ind',parlist1) namelist=[] for i,par in enumerate(parlist1): namelist.append(par['name']) parl = np.append(namelist,'dist') index=1 for i,name in enumerate(parl): plt.subplot(4,4,index) plt.tight_layout() sns.kdeplot(df1[name],bw_adjust=.8,label='X_1ind') sns.kdeplot(df2[name],bw_adjust=.8,label='X_2ind') #plt.ylim(0,1) if i < (len(parl)-2): plt.xlim((parlist1[i]['lower_limit'],parlist1[i]['upper_limit'])) index=index+1 if index==5: plt.legend(bbox_to_anchor=(1.05, 1)) #sns.kdeplot(df['K_XZ']) plt.savefig("1vs2ind"+str(n[0])+"_compareplot.pdf", bbox_inches='tight') #plt.show() plotdesnity1vs2() def ind1vs2indmeanandmode(): p2,df2= load('final','ACDC_X',parlist) df2=df2.drop(columns=['K_ARAY', 'n_ARAY']) mean_df2=np.mean(df2) sd_df2=np.std(df2) mode_df2=stats.mode(df2)[0][0] parlist1=parlist.copy() del parlist1[7:9] p1,df1= load('12','ACDC_1ind',parlist1) mean_df1=np.mean(df1) sd_df1=np.std(df1) mode_df1=stats.mode(df1)[0][0] namelist=[] for i,par in enumerate(parlist1): namelist.append(par['name']) parl = np.append(namelist,'dist') # set width of bars barWidth = 0.30 # Set position of bar on X axis r1 = np.arange(len(parl)) plt.bar((r1+barWidth*0),mean_df1,yerr=sd_df1, capsize=2,width=barWidth, label="1ind") plt.bar((r1+barWidth*1),mean_df2,yerr=sd_df2, capsize=2,width=barWidth, label="2ind") plt.xlabel('par', fontweight='bold') plt.xticks([r + barWidth for r in range(len(parl))], parl) plt.legend() plt.show() plt.bar((r1+barWidth*0),mode_df1,width=barWidth, label="1ind") plt.bar((r1+barWidth*1),mode_df2,width=barWidth, label="2ind") plt.xlabel('par', fontweight='bold') plt.xticks([r + barWidth for r in range(len(parl))], parl) plt.legend() plt.show() def calculateSS(ARA,parUsed): #sort ss according to their stabilitz #create stability list of shape : arabinose x steady x X,Y,Z unstable=np.zeros((len(ARA),3,3)) stable=np.zeros((len(ARA),3,3)) oscillation=np.zeros((len(ARA),3,3)) unstable[:]=np.nan stable[:]=np.nan oscillation[:]=np.nan for ai,a in enumerate(ARA): ss=meq.findss(a,parUsed) if len(ss) > 3: print("error: more than 3 steadystates") else: d = b = c=0 # can replace a,b,c by si, but allow to have osccilation on the same level for si,s in enumerate(ss): e=meq.stability(a,parUsed,[s])[0][0] if all(e<0): stable[ai][d]=s d+=1 if any(e>0): pos=e[e>0] if len(pos)==2: if pos[0]-pos[1] == 0: oscillation[ai][b]=s b+=1 else: unstable[ai][c]=s c+=1 else: unstable[ai][c]=s c+=1 return unstable,stable,oscillation #chose parameter def bifurcation(parUsed=None): p,df= load('final','ACDC_X2',parlist) #parUsed=par0 if parUsed == None: parUsed=p[0] ARA=np.logspace(-4.5,-2.,20,base=10) ss=meq.findss(ARA[0],parUsed)[0] #print(ss) init=[ss[0],ss[1],ss[2]] X,Y,Z=meq.model(ARA,parUsed,totaltime=100,init=init) df_X=pd.DataFrame(X[500:],columns=ARA) sns.heatmap(df_X, cmap="Reds", norm=LogNorm()) plt.show() xss,yss,zss = calculateSScurve(ARA,parUsed) maxX=[] minX=[] maxY=[] minY=[] maxZ=[] minZ=[] # X,Y,Z=meq.model(ARA,parUsed,totaltime=400) delta=10e-5 for i in np.arange(0,len(ARA)): min_x=[np.nan,np.nan,np.nan] max_x=[np.nan,np.nan,np.nan] ss=meq.findss(ARA[i],parUsed) for si,s in enumerate(ss): init=[s[0]+delta,s[1]+delta,s[2]+delta] X,Y,Z=meq.model(ARA,parUsed,totaltime=100,init=init) # print(max(X[200:,i])) max_x[si]=max(X[200:,i]) min_x[si]=min(X[200:,i]) maxX.append(max_x) minX.append(min_x) # minX.append(min(X[200:,i])) maxY.append(max(Y[200:,i])) minY.append(min(Y[200:,i])) maxZ.append(max(Z[200:,i])) minZ.append(min(Z[200:,i])) plt.subplot(3,1,1) plt.plot(ARA,xss,'--o') plt.plot(ARA,maxX,'-b') plt.plot(ARA,minX,'-g') #plt.fill_between(ARA,maxX,minX,alpha=0.2,facecolor='red') plt.yscale("log") plt.xscale("log") plt.subplot(3,1,2) plt.plot(ARA,yss,'--b') # plt.plot(ARA,maxY,'-b') # plt.plot(ARA,minY,'-b') # plt.fill_between(ARA,maxY,minY,alpha=0.2,facecolor='blue') plt.yscale("log") plt.xscale("log") plt.subplot(3,1,3) plt.plot(ARA,zss,'--g') # plt.plot(ARA,maxZ,'-g') # plt.plot(ARA,minZ,'-g') # plt.fill_between(ARA,maxZ,minZ,alpha=0.2,facecolor='green') plt.yscale("log") plt.xscale("log") plt.show() def getlimitcycle(ARA,ssl,par,tt=500): M=np.ones((len(ARA),3,3))*np.nan m=np.ones((len(ARA),3,3))*np.nan delta=10e-5 transient=500 for ai,a in enumerate(ARA): ss=ssl[ai] for si,s in enumerate(ss): if any(np.isnan(s)) == False: init=[s[0]+delta,s[1]+delta,s[2]+delta] X,Y,Z=meq.model([a],par,totaltime=tt,init=init) M[ai,si,0]=max(X[transient:]) M[ai,si,1]=max(Y[transient:]) M[ai,si,2]=max(Z[transient:]) m[ai,si,0]=min(X[transient:]) m[ai,si,1]=min(Y[transient:]) m[ai,si,2]=min(Z[transient:]) max_list=argrelextrema(X[transient:], np.greater) maxValues=X[transient:][max_list] min_list=argrelextrema(X[transient:], np.less) minValues=X[transient:][min_list] maximaStability = abs(maxValues[-2]-minValues[-2])-(maxValues[-3]-minValues[-3]) if maximaStability > 0.01: print("limit cycle not achieved for ARA["+str(ai)+"]:" + str(a) + " at st.s:"+ str(s)) return M,m def bifurcation_plot(n,filename): p,df= load(n,filename,parlist) ARA=np.logspace(-4.5,-2.,200,base=10) un,st,osc=calculateSS(ARA,p[1]) M,m=getlimitcycle(ARA,osc,p[1],tt=500) for i,col in enumerate(['r','b','g']): plt.subplot(3,1,i+1) plt.plot(ARA,un[:,:,i],'--'+col) plt.plot(ARA,st[:,:,i],'-'+col) plt.plot(ARA,osc[:,:,i],'--'+col) plt.fill_between(ARA,M[:,0,i],m[:,0,i],alpha=0.2,facecolor=col) plt.fill_between(ARA,M[:,1,i],m[:,1,i],alpha=0.2,facecolor=col) plt.fill_between(ARA,M[:,2,i],m[:,2,i],alpha=0.2,facecolor=col) plt.yscale("log") plt.xscale("log") plt.show() #bifurcation(p[1])
icvara/AC-DC
compareplot.py
compareplot.py
py
14,082
python
en
code
0
github-code
6
2416692184
from pygame import * from random import randrange from math import * from Pong.GameStats import GameStats from Pong.Player.Goal import Goal from Pong.Player.PlayerRacket import PlayerRacket class Ball: MAX_SPEED_Y = 12 SPEED_X = 6 COLOR = (int(255), int(255), int(255)) RADIUS: int = 10 WIN_SCORE = 10 def __init__(self, players): self.velocity = (Ball.SPEED_X, randrange(-Ball.MAX_SPEED_Y, Ball.MAX_SPEED_Y)) self.pos = (int(GameStats.width/2), int(GameStats.height/2)) self.players = players def update_move(self): # if there is collision self.pos = (self.velocity[0] + self.pos[0], self.pos[1] + self.velocity[1]) self.collision_update() if self.pos[0] < -5 or self.pos[0] > 640: self.pos = (320, 320) self.velocity = (Ball.SPEED_X, randrange(-Ball.MAX_SPEED_Y, Ball.MAX_SPEED_Y)) elif self.pos[1] < 0 or self.pos[1] > GameStats.height: self.velocity = (self.velocity[0], -self.velocity[1]) def draw(self, surface): self.update_move() draw.circle(surface, Ball.COLOR, self.pos, Ball.RADIUS) def collision_update(self): col_pos = (0, 0) col_body = None collision = False for p in [self.players[0].racket, self.players[1].racket, self.players[0].goal, self.players[1].goal]: for point in ((self.pos[0] + Ball.RADIUS*cos(theta*0.01), self.pos[1] + Ball.RADIUS*sin(theta*0.01)) for theta in range(0, int(pi*2*100))): if p[0] < point[0] < p[0] + p[2] and \ p[1] < point[1] < p[1] + p[3]: col_pos = point col_body = p collision = True break if collision: break if collision: if type(col_body) is PlayerRacket: self.velocity = (-self.velocity[0], int((col_pos[1] - col_body[1] - col_body[3]/2)/col_body[3]*Ball.MAX_SPEED_Y*2)) elif type(col_body) is Goal: if self.players[0].goal == col_body: if self.players[0].score() == self.WIN_SCORE: self.players[0].reset() self.players[1].reset() if self.players[1].goal == col_body: if self.players[1].score() == self.WIN_SCORE: self.players[0].reset() self.players[1].reset() self.pos = (GameStats.width//2, GameStats.height//2) self.velocity = ((Ball.SPEED_X * ((-1) ** randrange(2))), randrange(-Ball.MAX_SPEED_Y, Ball.MAX_SPEED_Y)) def __getitem__(self, key): return self.pos[key]
dogancanalgul/Pong
ball.py
ball.py
py
2,820
python
en
code
0
github-code
6
11047304211
'''---------------------------------------------------------------------------- engine.py ----------------------------------------------------------------------------''' from engine.ssc.image_ini import * import numpy as np #import sunpy.instr.aia def standard_multitype_ini(observations): '''Standard initialization for different kind of observation. The initialization contains ratiation, limb darkening correction, Bz estimation and limb out region remove. Parameter --------- observations - Sunpy map object, it can contain multiple images. Return ------ observations - Sunpy map object with modified data''' # Create a new list for the initialized observations initialized_observations = [] for obs in observations: if obs.detector == 'HMI': # Replace np.nan-s with zero for rotating obs._data = np.nan_to_num(obs.data) # Rotate the observations obs = obs.rotate() # Limb darkening correction, only HIM white lighe image if obs.measurement == 'continuum': obs = dark_limb.limb_darkening_correct(obs, limb_cut=0.99) # Longitudinal magnetic field to Bz estimation if obs.measurement == 'magnetogram': obs = blbz.LOS2Bz(obs) # Cut the limb and replace outlimb region with np.nan obs = cut.solar_limb(obs) #if obs.detector == 'AIA': # Processes a level 1 AIAMap into a level 1.5 AIAMap #obs = sunpy.instr.aia.aiaprep(obs) # Append the new maps initialized_observations.append(obs) # Delete raw observations del observations return initialized_observations
gyengen/SheffieldSolarCatalog
engine/initialisation.py
initialisation.py
py
1,753
python
en
code
1
github-code
6
70747391228
# -*- coding: utf-8 -*- """ Created on Fri Oct 28 12:54:38 2016 @author: Kylin """ import math import quyu import erfenbijin import pylab as pl a = 200 Rx = 10 Ry = 20 V0 = 100 theta = math.pi/5 dt = 0.1 Vx = V0*math.cos(theta) Vy = V0*math.sin(theta) R_x = [] V_x = [] i = 0 while 1 : Rx=Rx+Vx*dt Ry=Ry+Vy*dt if Ry*(Ry-Vy*dt)<0: k=(Ry-0)/(0-(Ry-Vy*dt)) x0=((1+k)*Rx-k*Vx*dt)/(1+k) R_x+=[x0] V_x+=[Vx] if quyu.inZhengfangxing(Rx,Ry,a)==1\ or quyu.inZhengfangxing(Rx,Ry,a)==0: continue if quyu.inZhengfangxing(Rx,Ry,a)==-1: x1=Rx-Vx*dt y1=Ry-Vy*dt x2=Rx y2=Ry t=erfenbijin.Zhengfangxing_erFenbijin(x1,y1,x2,y2,0,a) Rx=t[0] Ry=t[1] #continue if quyu.inZhengfangxing(Rx,Ry,a)==0: if (Rx== a or Rx==-a) and Ry>-a and Ry<a: Vx=-Vx Vy= Vy if (Ry== a or Ry==-a) and Rx>-a and Rx<a: Vx= Vx Vy=-Vy if (Rx== a and Ry== a)or(Rx==-a and Ry==a)or(Rx==a and Ry==-a)or(Rx==-a and Ry==-a): Vx=-Vx Vy=-Vy i+=1 print(i) if i>10000: break pl.plot(R_x, V_x,"o",label="Vx-Rx") pl.title(u"正方形".encode("gb2312")) pl.xlabel('Rx') pl.ylabel('Vx') pl.legend() pl.show()
52kylin/compuational_physics_N2014301020034
exercise_09_new/code/zfxvx.py
zfxvx.py
py
1,459
python
en
code
0
github-code
6
27251269716
""" 文件名: Code/Chapter05/C01_ConfigManage/E02_Config.py 创建时间: 2023/2/26 3:47 下午 作 者: @空字符 公众号: @月来客栈 知 乎: @月来客栈 https://www.zhihu.com/people/the_lastest """ import os class ModelConfig(object): def __init__(self, train_file_path=os.path.join('data', 'train.txt'), val_file_path=os.path.join('data', 'val.txt'), test_file_path=os.path.join('data', 'test.txt'), split_sep='_!_', is_sample_shuffle=True, batch_size=16, learning_rate=3.5e-5, max_sen_len=None, num_labels=3, epochs=5): self.train_file_path = train_file_path self.val_file_path = val_file_path self.test_file_path = test_file_path self.split_sep = split_sep self.is_sample_shuffle = is_sample_shuffle self.batch_size = batch_size self.learning_rate = learning_rate self.max_sen_len = max_sen_len self.num_labels = num_labels self.epochs = epochs # def train(config): dataset = get_dataset(config) model = get_mode(config) if __name__ == '__main__': config = ModelConfig(epochs=10) print(f"epochs = {config.epochs}") # train(config)
moon-hotel/DeepLearningWithMe
Code/Chapter05/C01_ConfigManage/E02_Config.py
E02_Config.py
py
1,326
python
en
code
116
github-code
6
70402167867
import const import sys, os import string import random QUESTION_TOOL='What are the tools used in the attack?' QUESTION_GROUP='Who is the attack group?' INPUT_FILE='input/sample_attack_report_raw.txt' TRAIN_RATE=0.8 VUL_RATE=0.1 LABEL_TRAIN='train' LABEL_VAL='dev' LABEL_TEST='test' SENTENSE_DELIMETER=". " WORD_DELIMETER=" " LAVEL_GROUP='B-AT' LAVEL_I_GROUP='I-AT' LAVEL_TOOL='B-TL' LAVEL_I_TOOL='I-TL' LAVEL_SEC='B-SC' LAVEL_I_SEC='I-SC' LAVEL_COM='B-CM' LAVEL_I_COM='I-CM' LAVEL_OTHER='O' DATASET_DELIMETER="\t" TRAIN_FILE='train.txt' VAL_FILE='dev.txt' TEST_FILE='test.txt' MAX_WORD_NUM=200 MAX_WORD=1000 NUM_SENTENSE_PER_ROW=100 LONG_SENTENSE='long.txt' O_RATE=1 EXCLUSIVE_LIST=['at'] LEN_RANDOM=10 alldataset={} def get_tools(): tools=[] with open(const.OUT_TOOL_FILE, 'r') as file: for row in file: tool = row.replace(const.NEWLINE, "") #tool = tool.lower() tools.append(tool) return tools def get_groups(): groups=[] with open(const.OUT_GROUP_FILE, 'r') as file: for row in file: group = row.replace(const.NEWLINE, "") #group=group.lower() groups.append(group) return groups def get_sectors(): sectors=[] with open(const.OUT_SECTOR_FILE, 'r') as file: for row in file: sector = row.replace(const.NEWLINE, "") #sector=sector.lower() sectors.append(sector) return sectors def get_companies(): companies=[] with open(const.OUT_COMPANY_FILE, 'r') as file: for row in file: company = row.replace(const.NEWLINE, "") #company=company.lower() companies.append(company) return companies def random_str(word): dat = string.digits + string.ascii_lowercase + string.ascii_uppercase return ''.join([random.choice(dat) for i in range(len(word))]).lower() def get_random_TOOL(start,end): index=random.randint(start,end) tool=tools[index] name=tool.split(" ")[0] return name def get_random_TA(start,end): index=random.randint(start,end) ta_name=groups[index] name = ta_name.split(" ")[0] return name def create_dataset(mode,num_dataset, start_a, end_a, start_t, end_t): cnt=0 data=[] data_O=[] data_tag = [] if mode == LABEL_TRAIN: data=lines[:num_train-1] elif mode==LABEL_VAL: data=lines[num_train:num_train+num_val] else: data = lines[num_train+num_val:] for row in data: print("cnt: "+str(cnt)) if cnt>num_dataset: print("Exceed "+str(num_data)) return sentenses = row.split(SENTENSE_DELIMETER) #print(str(len(sentenses))) for sentense in sentenses: words= sentense.split(WORD_DELIMETER) if len(words) >=MAX_WORD_NUM: # with open(LONG_SENTENSE, "a", encoding='utf8') as out_sentense: # out_sentense.write(sentense + const.NEWLINE) continue len_word=0 for word in words: len_word=len_word+len(word) if len_word >= MAX_WORD: continue prev='' prev_org='' dataset=[] index=0 for word in words: lavel = LAVEL_OTHER word=word.strip() tmp_word = word # groups if tmp_word in groups: lavel=LAVEL_GROUP elif prev+WORD_DELIMETER+tmp_word in groups: lavel = LAVEL_I_GROUP prev_org = get_random_TA(start_a, end_a) dataset[index-1]=prev_org + DATASET_DELIMETER + LAVEL_GROUP + const.NEWLINE # tools elif tmp_word in tools and tmp_word.lower() not in EXCLUSIVE_LIST: lavel=LAVEL_TOOL elif prev + WORD_DELIMETER + tmp_word in tools: lavel = LAVEL_I_TOOL prev_org = get_random_TOOL(start_t,end_t) dataset[index - 1] = prev_org + DATASET_DELIMETER + LAVEL_TOOL + const.NEWLINE # # sectors # elif tmp_word in sectors: # lavel = LAVEL_SEC # # elif prev + WORD_DELIMETER + tmp_word in sectors: # lavel = LAVEL_I_SEC # dataset[index - 1] = prev_org + DATASET_DELIMETER + LAVEL_SEC + const.NEWLINE # # # companies # elif tmp_word in companies: # lavel = LAVEL_COM # # elif prev + WORD_DELIMETER + tmp_word in companies: # lavel = LAVEL_I_COM # dataset[index - 1] = prev_org + DATASET_DELIMETER + LAVEL_COM + const.NEWLINE if lavel ==LAVEL_GROUP or lavel==LAVEL_I_GROUP: word=get_random_TA(start_a, end_a) word=word elif lavel ==LAVEL_TOOL or lavel==LAVEL_I_TOOL: word=get_random_TOOL(start_t,end_t) word = word dataset.append(word + DATASET_DELIMETER + lavel + const.NEWLINE) prev=tmp_word prev_org=word index=index+1 num_data=0 for item in dataset: label=item.split(DATASET_DELIMETER)[1].strip() if label!=LAVEL_OTHER: num_data=num_data+1 if num_data == 0: data_O.append(dataset) else: data_tag.append(dataset) cnt = cnt + 1 O_num = len(data_O) max_O_num = int(O_num* O_RATE) alldataset[mode]=data_tag+data_O[:max_O_num] return(mode) with open(INPUT_FILE, 'r') as file: lines = file.readlines() context=len(lines) print("total context:" +str(context)) if len(sys.argv)>1: context = int(sys.argv[1]) num_train=round(context*TRAIN_RATE) num_val=round(context*VUL_RATE) num_test=context-num_train-num_val print("num_train:" +str(num_train)) print("num_val:" +str(num_val)) print("num_test:" +str(num_test)) tools=get_tools() groups=get_groups() # sectors=get_sectors() # companies=get_companies() train_ta_end=round(len(groups)*TRAIN_RATE) dev_ta_end=train_ta_end+round(len(groups)*VUL_RATE) test_ta_end=len(groups)-1 train_tl_end=round(len(tools)*TRAIN_RATE) dev_tl_end=train_tl_end+round(len(tools)*VUL_RATE) test_tl_end=len(tools)-1 if os.path.exists(TRAIN_FILE): os.remove(TRAIN_FILE) if os.path.exists(VAL_FILE): os.remove(VAL_FILE) if os.path.exists(TEST_FILE): os.remove(TEST_FILE) if os.path.exists(LONG_SENTENSE): os.remove(LONG_SENTENSE) create_dataset(LABEL_TRAIN, num_train,0,train_ta_end,0,train_tl_end) create_dataset(LABEL_VAL, num_val,train_ta_end+1,dev_ta_end,train_tl_end+1,dev_tl_end) create_dataset(LABEL_TEST, num_test,dev_ta_end+1,test_ta_end,dev_tl_end+1,test_tl_end) with open(LABEL_TRAIN + '.txt', "a", encoding='utf8') as out: for dataset in alldataset[LABEL_TRAIN]: out.writelines(dataset) out.write('.' + DATASET_DELIMETER + LAVEL_OTHER + const.NEWLINE + const.NEWLINE) with open(LABEL_VAL + '.txt', "a", encoding='utf8') as out: for dataset in alldataset[LABEL_VAL]: out.writelines(dataset) out.write('.' + DATASET_DELIMETER + LAVEL_OTHER + const.NEWLINE + const.NEWLINE) with open(LABEL_TEST + '.txt', "a", encoding='utf8') as out: for dataset in alldataset[LABEL_TEST]: out.writelines(dataset) out.write('.' + DATASET_DELIMETER + LAVEL_OTHER + const.NEWLINE + const.NEWLINE)
gamzattirev/Ahogrammer
create_dataset.py
create_dataset.py
py
7,721
python
en
code
0
github-code
6
44379710290
from random import randint y=int(randint(1,10)) for i in range(3): x = int(input("猜数字:\n")) if x >y: print("大了") elif x<y: print("小了") else: print("猜对了") break print("Game over!")
wuge-1996/Python-Exercise
Exercise 39.py
Exercise 39.py
py
247
python
en
code
0
github-code
6
17256948742
#El bloque else justo después de for / while se ejecuta solo cuando el ciclo #NO termina con una declaración de interrupción. """for i in range(1,5): print (i) else : print("Sin descanso/ se ejecuta porque no hay break")""" # Program to check if an array consists # of even number """def evenumbers (lista) : for l in lista : if l % 2 == 0 : print ("Hay numeros pares en la lista") break else : print ("No hay numeros pares en la lista") print ("Lista 1 :" ) evenumbers([5,3,6]) print ("Lista 2 :" ) evenumbers([1,3,5])""" count = 4 while count < 1 : count += 1 print (count) else: print ("no break")
codekacode/Exercisespython
Elsefor.py
Elsefor.py
py
682
python
es
code
0
github-code
6
17509722663
''' Problem Statement Your company has a big conference coming up and needs to book conference rooms in a convention center. To help the company save budget, we want to book as few conference rooms as possible given a list of meeting schedules that contains only the starting and ending time of each meeting. Write a program that helps figure out the minumum number of conference rooms needed. Example: [(2,7)] -> Output: 1 [(0,30),(5,10),(15,20) (21 22) (21 28) ] -> Explanation: Room1: (0,30) Room2: (5,10),(15,20) -> Output: 2 (0, 30), (0, 10), (5,15), (11, 20), (17, 25), (21,30) examples (0,30), (5,10), (15,20), (21 22) (21 28) (0, 30), (0, 10), (5,15), (11, 20), (17, 25), (21,30) assumptions approaches 1) (0,30), (5,22), (21 28) 2) (0, 30), (0, 10), (5,15), (11, 20), (17, 25), (21,30) 0 1 2 3 4 5 0 30 count: 1 create a res array for any new interval, look in res for a place where int has no intersection. this space defines a room! tradeoffs this appears to be the only way ''' from typing import List, Tuple def roomcount(times: List[Tuple[int, int]]) -> int: ''' s1------e1 s2-------e2 ''' def intersects(start1, end1, start2, end2): return min(end1, end2) > max(start1, start2) def no_intersects(lis): for int_ in lis: if intersects(*int_, start, end): # return true if they touch? return False return True rooms = [] for start, end in times: for lis in rooms: if no_intersects(lis): lis.append((start, end)) break else: rooms.append([(start, end)]) return len(rooms) ints = [(2,7)] # -> Output: 1 print(roomcount(ints)) ints = [(0,30),(5,10),(15,20), (21, 22), (21, 28) ] #3 print(roomcount(ints)) ints = [(0,30),(5,10),(15,20),(21, 22), (22, 28) ] #2 print(roomcount(ints))
soji-omiwade/cs
dsa/before_rubrik/minimum_rooms.py
minimum_rooms.py
py
1,937
python
en
code
0
github-code
6
19040286888
from typing import Dict, List, Optional, Tuple, Union import numpy as np from rl_nav import constants from rl_nav.environments import wrapper try: import cv2 import matplotlib from matplotlib import cm from matplotlib import pyplot as plt from mpl_toolkits.axes_grid1 import make_axes_locatable except ModuleNotFoundError: raise AssertionError( "To use visualisation wrapper, further package requirements " "need to be satisfied. Please consult README." ) class VisualisationEnv(wrapper.Wrapper): COLORMAP = cm.get_cmap("plasma") NORMALISE = False def __init__(self, env): super().__init__(env=env) def render( self, save_path: Optional[str] = None, dpi: Optional[int] = 60, format: str = "state", ) -> None: """Method to render environment. Args: save_path: optional path to which to save image. dpi: optional pixel. format: state of environment to render. """ if format == constants.STATE: assert ( self._env.active ), "To render map with state, environment must be active." "call reset_environment() to reset environment and make it active." "Else render stationary environment skeleton using format='stationary'" if save_path: fig = plt.figure() plt.imshow( self._env._env_skeleton( rewards=format, agent=format, ), origin="lower", ) fig.savefig(save_path, dpi=dpi) else: plt.imshow( self._env._env_skeleton( rewards=format, agent=format, ), origin="lower", ) def visualise_episode_history( self, save_path: str, history: Union[str, List[np.ndarray]] = "train" ) -> None: """Produce video of episode history. Args: save_path: name of file to be saved. history: "train", "test" to plot train or test history, else provide an independent history. """ if isinstance(history, str): if history == constants.TRAIN: history = self._env.train_episode_history elif history == constants.TEST: history = self._env.test_episode_history elif history == constants.TRAIN_PARTIAL: history = self._env.train_episode_partial_history elif history == constants.TEST_PARTIAL: history = self._env.test_episode_partial_history SCALING = 20 FPS = 30 map_shape = history[0].shape frameSize = (SCALING * map_shape[1], SCALING * map_shape[0]) out = cv2.VideoWriter( filename=save_path, fourcc=cv2.VideoWriter_fourcc("m", "p", "4", "v"), fps=FPS, frameSize=frameSize, ) for frame in history: bgr_frame = frame[..., ::-1].copy() flipped_frame = np.flip(bgr_frame, 0) scaled_up_frame = np.kron(flipped_frame, np.ones((SCALING, SCALING, 1))) out.write((scaled_up_frame * 255).astype(np.uint8)) out.release() def _plot_normalised_heatmap_over_env( self, heatmap: Dict[Tuple[int, int], float], save_name: str ): split_save_name = save_name.split(".pdf")[0] save_name = f"{split_save_name}_normalised.pdf" environment_map = self._env._env_skeleton( rewards=None, agent=None, ) all_values = list(heatmap.values()) current_max_value = np.max(all_values) current_min_value = np.min(all_values) for position, value in heatmap.items(): # remove alpha from rgba in colormap return # normalise value for color mapping environment_map[position[::-1]] = self.COLORMAP( (value - current_min_value) / (current_max_value - current_min_value) )[:-1] fig = plt.figure() plt.imshow(environment_map, origin="lower", cmap=self.COLORMAP) plt.colorbar() fig.savefig(save_name, dpi=60) plt.close() def _plot_unnormalised_heatmap_over_env( self, heatmap: Dict[Tuple[int, int], float], save_name: str ): environment_map = self._env._env_skeleton( rewards=None, agent=None, ) for position, value in heatmap.items(): # remove alpha from rgba in colormap return environment_map[position[::-1]] = self.COLORMAP(value)[:-1] fig = plt.figure() plt.imshow(environment_map, origin="lower", cmap=self.COLORMAP) plt.colorbar() fig.savefig(save_name, dpi=60) plt.close() def plot_heatmap_over_env( self, heatmap: Dict[Tuple[int, int], float], save_name: str, ) -> None: """plot quantities over top of environmen (e.g. value function) Args: heatmap: data to plot; dictionary of states (keys) and quantities (values). fig: figure on which to plot. ax: axis on which to plot. save_name: path to which to save plot. """ self._plot_unnormalised_heatmap_over_env(heatmap=heatmap, save_name=save_name) self._plot_normalised_heatmap_over_env(heatmap=heatmap, save_name=save_name) def plot_numbered_values_over_env( self, values: Dict[Tuple[int], np.ndarray], save_name: str ): fig = plt.figure() environment_map = self._env._env_skeleton( rewards=None, agent=None, ) plt.imshow(environment_map, origin="lower", cmap=self.COLORMAP) all_states = list(values.keys()) for state, action_values in values.items(): for i, action_value in enumerate(action_values): if all_states[i] != state: xytext = np.array(state) + 0.2 * ( np.array(all_states[i]) - np.array(state) ) plt.annotate( f"{i}: {round(action_value, 2)}", xy=state, xytext=xytext, arrowprops={ "headlength": 2, "headwidth": 2, "width": 0.5, "linewidth": 0.1, }, color="y", size=5, ) else: plt.annotate( i, xy=state, color="g", size=5, ) fig.savefig(save_name, dpi=60) plt.close()
philshams/Euclidean_Gridworld_RL
rl_nav/environments/visualisation_env.py
visualisation_env.py
py
6,969
python
en
code
1
github-code
6
12423871357
__author__ = "Vanessa Sochat, Alec Scott" __copyright__ = "Copyright 2021-2023, Vanessa Sochat and Alec Scott" __license__ = "Apache-2.0" import json import os import re import shlex import subprocess import pakages.builders.spack.cache as spack_cache import pakages.client import pakages.oras import pakages.utils from pakages.logger import logger class SpackClient(pakages.client.PakagesClient): """ Pakages has a main controller for interacting with pakages. """ def parse_package_request(self, packages): """ Parse the packages and repo (if any) from it. This is shared between install and build """ # By defualt, assume not adding a repository repo = None if not isinstance(packages, list): packages = shlex.split(packages) # Case 1: we have an install directed at the present working directory if packages and packages[0] == ".": repo = os.getcwd() packages.pop(0) # If we have a path (akin to the first) if packages and os.path.exists(packages[0]): repo = packages.pop(0) # OR if we have a github URI TODO, can clone here if packages and re.search("(http|https)://github.com", packages[0]): repo = packages.pop(0) # If we don't have packages and we have a repo, derive from PWD if repo and not packages: for path in pakages.utils.recursive_find(repo, "package.py"): packages.append(os.path.basename(os.path.dirname(path))) # Finally, add the repository if repo: self.add_repository(repo) return packages def list_installed(self): """ List installed packages """ command = ["spack", "find"] for line in pakages.utils.stream_command(command): print(line.strip("\n")) command = ["spack", "find", "--json"] result = pakages.utils.run_command(command) return json.loads(result["message"]) def build(self, packages, cache_dir=None, key=None, **kwargs): """ Build a package into a cache """ packages = self.parse_packages(packages) # Prepare a cache directory cache = spack_cache.BuildCache( spec_name=packages, cache_dir=cache_dir or self.settings.cache_dir, username=self.settings.username, email=self.settings.email, settings=self.settings, ) # Install all packages self._install(packages) cache.create(packages, key=key) # Push function is on cache, if desired return cache def parse_packages(self, packages): """ Helper function to ensure we return consistent names. """ packages = self.parse_package_request(packages) if isinstance(packages, list): packages = packages[0] if " " in packages: logger.exit("We currently only support one package for build.") logger.info(f"Preparing package {packages}") return packages def add_repository(self, path): """ Add a repository. Given a path that exists, add the repository to the underlying spack. If you need to add a GitHub uri, create a pakages.repo.PakRepo first. """ try: command = ["spack", "repo", "add", path] for line in pakages.utils.stream_command(command): logger.info(line.strip("\n")) except subprocess.CalledProcessError as e: if "Repository is already registered" in e.output: pass else: raise e def download_cache(self, target, download_dir=None): """ Download a target to a cache download directory """ download_dir = download_dir or pakages.utils.get_tmpdir() reg = pakages.oras.get_oras_client() # This will error if not successful, result is a list of files reg.pull(target=target, outdir=download_dir) return download_dir def install(self, packages, **kwargs): """ Install one or more packages. """ packages = self.parse_packages(packages) use_cache = kwargs.get("use_cache", False) if use_cache: cache_dir = self.download_cache(use_cache) cache = spack_cache.BuildCache( packages, cache_dir=cache_dir, settings=self.settings ) # Cache is named after target, this is a filesystem mirror cache.add_as_mirror(re.sub("(-|:|/)", "-", use_cache)) # Prepare install command with or without cache command = ["spack", "install"] if use_cache: command.append("--use-cache") if isinstance(packages, list): command.append(" ".join(packages)) else: command.append(packages) # Install packages using system spack - we aren't responsible for this working for line in pakages.utils.stream_command(command): logger.info(line.strip("\n")) def _install(self, packages): """ Install one or more packages. This eventually needs to take into account using the GitHub packages bulid cache """ # Install packages using system spack - we aren't responsible for this working for line in pakages.utils.stream_command(["spack", "install", packages]): logger.info(line.strip("\n")) def uninstall(self, packages): """ Uninstall a spack package """ for line in pakages.utils.stream_command(["spack", "uninstall", packages]): logger.info(line.strip("\n"))
syspack/pakages
pakages/builders/spack/client.py
client.py
py
5,794
python
en
code
2
github-code
6
13914723162
import sys import oneflow as flow import oneflow.typing as tp import argparse import numpy as np import os import shutil import json from typing import Tuple from textcnn import TextCNN sys.path.append("../..") from text_classification.utils import pad_sequences, load_imdb_data parser = argparse.ArgumentParser() parser.add_argument('--ksize_list', type=str, default='2,3,4,5') parser.add_argument('--n_filters', type=int, default=100) parser.add_argument('--emb_dim', type=int, default=100) parser.add_argument('--dropout', type=float, default=0.5) parser.add_argument('--lr', type=float, default=1e-4) parser.add_argument('--sequence_length', type=int, default=150) parser.add_argument('--batch_size', type=int, default=32) parser.add_argument('--model_load_dir', type=str, default='') parser.add_argument('--model_save_every_n_iter', type=int, default=1000) parser.add_argument('--n_steps', type=int, default=10000) parser.add_argument('--n_epochs', type=int, default=15) parser.add_argument('--model_save_dir', type=str, default='./best_model') args = parser.parse_args() assert ',' in args.ksize_list args.ksize_list = [int(n) for n in args.ksize_list.split(',')] args.emb_num = 50000 args.n_classes = 2 model = TextCNN( args.emb_num, args.emb_dim, ksize_list=args.ksize_list, n_filters_list=[args.n_filters] * len(args.ksize_list), n_classes=args.n_classes, dropout=args.dropout) def get_train_config(): config = flow.function_config() config.default_data_type(flow.float) return config def get_eval_config(): config = flow.function_config() config.default_data_type(flow.float) return config @flow.global_function('train', get_train_config()) def train_job(text: tp.Numpy.Placeholder((args.batch_size, args.sequence_length), dtype=flow.int32), label: tp.Numpy.Placeholder((args.batch_size,), dtype=flow.int32) ) -> tp.Numpy: with flow.scope.placement("gpu", "0:0"): logits = model.get_logits(text, is_train=True) loss = flow.nn.sparse_softmax_cross_entropy_with_logits(label, logits, name="softmax_loss") lr_scheduler = flow.optimizer.PiecewiseConstantScheduler([], [args.lr]) flow.optimizer.Adam(lr_scheduler).minimize(loss) return loss @flow.global_function('predict', get_eval_config()) def eval_job(text: tp.Numpy.Placeholder((args.batch_size, args.sequence_length), dtype=flow.int32), label: tp.Numpy.Placeholder((args.batch_size,), dtype=flow.int32) ) -> Tuple[tp.Numpy, tp.Numpy]: with flow.scope.placement("gpu", "0:0"): logits = model.get_logits(text, is_train=False) loss = flow.nn.sparse_softmax_cross_entropy_with_logits(label, logits, name="softmax_loss") return label, logits def suffle_batch(data, label, batch_size): permu = np.random.permutation(len(data)) data, label = data[permu], label[permu] batch_n = len(data) // batch_size x_batch = np.array([data[i * batch_size:i * batch_size + batch_size] for i in range(batch_n)], dtype=np.int32) y_batch = np.array([label[i * batch_size:i * batch_size + batch_size] for i in range(batch_n)], dtype=np.int32) return x_batch, y_batch def acc(labels, logits, g): predictions = np.argmax(logits, 1) right_count = np.sum(predictions == labels) g["total"] += labels.shape[0] g["correct"] += right_count def train(checkpoint): path = '../imdb' (train_data, train_labels), (test_data, test_labels) = load_imdb_data(path) with open(os.path.join(path, 'word_index.json')) as f: word_index = json.load(f) word_index = {k: (v + 2) for k, v in word_index.items()} word_index["<PAD>"] = 0 word_index["<UNK>"] = 1 train_data = pad_sequences(train_data, value=word_index["<PAD>"], padding='post', maxlen=args.sequence_length) test_data = pad_sequences(test_data, value=word_index["<PAD>"], padding='post', maxlen=args.sequence_length) best_accuracy = 0.0 best_epoch = 0 for epoch in range(1, args.n_epochs + 1): print("[Epoch:{}]".format(epoch)) data, label = suffle_batch(train_data, train_labels, args.batch_size) for i, (texts, labels) in enumerate(zip(data, label)): loss = train_job(texts, labels).mean() if i % 20 == 0: print(loss) data, label = suffle_batch(test_data, test_labels, args.batch_size) g = {"correct": 0, "total": 0} for i, (texts, labels) in enumerate(zip(data, label)): labels, logits = eval_job(texts, labels) acc(labels, logits, g) accuracy = g["correct"] * 100 / g["total"] print("[Epoch:{0:d} ] accuracy: {1:.1f}%".format(epoch, accuracy)) if accuracy > best_accuracy: best_accuracy = accuracy best_epoch = epoch if not os.path.exists(args.model_save_dir): os.mkdir(args.model_save_dir) else: shutil.rmtree(args.model_save_dir) assert not os.path.exists(args.model_save_dir) os.mkdir(args.model_save_dir) print("Epoch:{} save best model.".format(best_epoch)) checkpoint.save(args.model_save_dir) print("Epoch:{} get best accuracy:{}".format(best_epoch, best_accuracy)) if __name__ == '__main__': checkpoint = flow.train.CheckPoint() checkpoint.init() train(checkpoint)
Oneflow-Inc/oneflow_nlp_model
text_classification/textcnn/train_textcnn.py
train_textcnn.py
py
5,411
python
en
code
0
github-code
6
8246901300
""" Module containing the rheologies, fault setup, and ODE cycles code for the 2D subduction case. """ # general imports import json import configparser import numpy as np import pandas as pd from scipy.integrate import solve_ivp from numba import njit, objmode, float64, int64, boolean from scipy.interpolate import interp1d from warnings import warn from abc import ABC # seqeas imports from .kernels2d import Glinedisp, Klinedisp class Rheology(ABC): """ Abstract base class for rheologies. """ class NonlinearViscous(Rheology): r""" Implement a nonlinear viscous fault rheology, where the velocity :math:`v` is :math:`v = \tau^n / \alpha_n` given the shear stress :math:`\tau`, a strength constant :math:`\alpha_n`, and a constant exponent :math:`n`. """ def __init__(self, n, alpha_n, n_mid=None, alpha_n_mid=None, mid_transition=None, n_deep=None, alpha_n_deep=None, deep_transition=None, deep_transition_width=None, n_boundary=None, alpha_n_boundary=None): r""" Setup the rheology parameters for a given fault. Parameters ---------- alpha_n : float Nonlinear viscous rheology strength constant :math:`\alpha_n` [Pa^n * s/m] n : float Power-law exponent :math:`n` [-] """ # input check assert not np.logical_xor(deep_transition is None, deep_transition_width is None) # set number of variables self.n_vars = 2 """ Number of variables to track by rheology [-] """ # initialization self._n = float(n) self._n_mid = float(n_mid) if n_mid is not None else self.n self._n_deep = float(n_deep) if n_deep is not None else self.n_mid self.n_boundary = float(n_boundary) if n_boundary is not None else self.n_deep """ Power-law exponent :math:`n` [-] """ self.alpha_n = float(alpha_n) self.alpha_n_mid = (float(alpha_n_mid) if alpha_n_mid is not None else self.alpha_n) self.alpha_n_deep = (float(alpha_n_deep) if alpha_n_deep is not None else self.alpha_n_mid) self.alpha_n_boundary = (float(alpha_n_boundary) if alpha_n_boundary is not None else self.alpha_n_deep) r""" Nonlinear viscous rheology strength constant :math:`\alpha_n` [Pa^n * s/m] """ self.mid_transition = None if mid_transition is None else float(mid_transition) """ Depth [m] for the middle transition point """ self.deep_transition = None if deep_transition is None else float(deep_transition) """ (Upper) Depth [m] for the deep transition point """ self.deep_transition_width = (None if deep_transition_width is None else float(deep_transition_width)) """ (Downdip) Width [m] of the deep transition point """ @property def alpha_n(self): r""" Nonlinear viscous rheology strength constant :math:`\alpha_n` [Pa^n * s/m] """ return self._alpha_n @alpha_n.setter def alpha_n(self, alpha_n): self._alpha_n = float(alpha_n) self._A = self.calc_A(self._alpha_n, self._n) @property def alpha_n_mid(self): r""" Nonlinear viscous rheology strength constant :math:`\alpha_n` [Pa^n * s/m] """ return self._alpha_n_mid @alpha_n_mid.setter def alpha_n_mid(self, alpha_n_mid): self._alpha_n_mid = float(alpha_n_mid) self._A_mid = self.calc_A(self._alpha_n_mid, self._n_mid) @property def alpha_n_deep(self): r""" Nonlinear viscous rheology strength constant :math:`\alpha_n` [Pa^n * s/m] """ return self._alpha_n_deep @alpha_n_deep.setter def alpha_n_deep(self, alpha_n_deep): self._alpha_n_deep = float(alpha_n_deep) self._A_deep = self.calc_A(self._alpha_n_deep, self._n_deep) @property def n(self): """ Power-law exponent :math:`n` [-] """ return self._n @n.setter def n(self, n): self._n = float(n) self._A = self.calc_A(self._alpha_n, self._n) @property def n_mid(self): """ Power-law exponent :math:`n` [-] """ return self._n_mid @n_mid.setter def n_mid(self, n_mid): self._n_mid = float(n_mid) self._A_mid = self.calc_A(self._alpha_n_mid, self._n_mid) @property def n_deep(self): """ Power-law exponent :math:`n` [-] """ return self._n_deep @n_deep.setter def n_deep(self, n_deep): self._n_deep = float(n_deep) self._A_deep = self.calc_A(self._alpha_n_deep, self._n_deep) @property def A(self): r""" Rescaled strength term :math:`A = \alpha_n^{1/n}` [Pa * (s/m)^(1/n)] """ return self._A @property def A_mid(self): r""" Rescaled strength term :math:`A = \alpha_n^{1/n}` [Pa * (s/m)^(1/n)] """ return self._A_mid @property def A_deep(self): r""" Rescaled strength term :math:`A = \alpha_n^{1/n}` [Pa * (s/m)^(1/n)] """ return self._A_deep @staticmethod def calc_A(alpha_n, n): """ Calculate A from alpha_n and n """ return alpha_n ** (1 / n) def get_param_vectors(self, patch_depths, v_eff): r""" Calculate the depth-dependent arrays of :math:`\alpha_n`, :math:`n`, and :math:`A`, assuming :math:`\alpha_n` and :math:`\alpha_{n,eff}` vary log-linearly with depth, and :math:`n` adapts between the transition points. """ assert np.all(np.diff(patch_depths) >= 0) # start knots list knots = [patch_depths[0]] vals_alpha_n = [self.alpha_n] vals_n = [self.n] # add optional mid transition if self.mid_transition is not None: knots.append(patch_depths[np.argmin(np.abs(patch_depths - self.mid_transition))]) vals_alpha_n.append(self.alpha_n_mid) vals_n.append(self.n_mid) # add optional deep transition if self.deep_transition is not None: knots.append(patch_depths[np.argmin(np.abs(patch_depths - self.deep_transition))]) vals_alpha_n.append(self.alpha_n_deep) vals_n.append(self.n_deep) knots.append(patch_depths[np.argmin(np.abs(patch_depths - self.deep_transition - self.deep_transition_width))]) vals_alpha_n.append(self.alpha_n_boundary) vals_n.append(self.n_boundary) # add final value knots.append(patch_depths[-1]) vals_alpha_n.append(self.alpha_n_boundary) vals_alpha_n = np.array(vals_alpha_n) vals_n.append(self.n_boundary) vals_n = np.array(vals_n) vals_alpha_eff = SubductionSimulation.get_alpha_eff(vals_alpha_n, vals_n, v_eff) # interpolate alpha_n and alpha_eff alpha_n_vec = 10**interp1d(knots, np.log10(vals_alpha_n))(patch_depths) alpha_eff_vec = 10**interp1d(knots, np.log10(vals_alpha_eff))(patch_depths) # get n and A n_vec = SubductionSimulation.get_n(alpha_n_vec, alpha_eff_vec, v_eff) A_vec = alpha_n_vec ** (1 / n_vec) return alpha_n_vec, n_vec, A_vec class RateStateSteadyLogarithmic(Rheology): r""" Implement a steady-state rate-and-state rheology using the ageing law (effectively becoming a rate-dependent rheology) with velocity in logarithmic space defined by :math:`f_{ss} = f_0 + (a - b) * \zeta = \tau / \sigma_E` where :math:`f_{ss}` is the steady-state friction, :math:`f_0` is a reference friction, :math:`a` and :math:`b` are the rate-and-state frictional parameters, :math:`\zeta = \log (v / v_0)` is the logarithmic velocity, :math:`\tau` is the shear stress, and :math:`\sigma_E` is the effective fault normal stress. """ def __init__(self, v_0, alpha_h, alpha_h_mid=None, mid_transition=None, alpha_h_deep=None, deep_transition=None, deep_transition_width=None, alpha_h_boundary=None): r""" Setup the rheology parameters for a given fault. Parameters ---------- v_0 : float Reference velocity [m/s] used for the transformation into logarithmic space. alpha_h : float Rate-and-state parameter :math:`(a - b) * \sigma_E`, where :math:`a` and :math:`b` [-] are the rate-and-state frictional properties, and :math:`\sigma_E` [Pa] is effective fault normal stress. """ self.alpha_h = float(alpha_h) r""" Rate-and-state parameter :math:`(a - b) * \sigma_E` [Pa] """ # input check assert not np.logical_xor(deep_transition is None, deep_transition_width is None) assert float(v_0) > 0, "RateStateSteadyLogarithmic needs to have positive v_0." # set number of variables self.n_vars = 2 """ Number of variables to track by rheology [-] """ # initialization self.v_0 = float(v_0) """ Reference velocity :math:`v_0` [m/s] """ self.alpha_h = float(alpha_h) r""" Rate-and-state parameter :math:`(a - b) * \sigma_E` [Pa] """ self.alpha_h_mid = (float(alpha_h_mid) if alpha_h_mid is not None else self.alpha_h) r""" Middle rate-and-state parameter :math:`(a - b) * \sigma_E` [Pa] """ self.alpha_h_deep = (float(alpha_h_deep) if alpha_h_deep is not None else self.alpha_h_mid) r""" Deep rate-and-state parameter :math:`(a - b) * \sigma_E` [Pa] """ self.alpha_h_boundary = (float(alpha_h_boundary) if alpha_h_boundary is not None else self.alpha_h_deep) r""" Boundary-layer rate-and-state parameter :math:`(a - b) * \sigma_E` [Pa] """ self.mid_transition = None if mid_transition is None else float(mid_transition) """ Depth [m] for the middle transition point """ self.deep_transition = None if deep_transition is None else float(deep_transition) """ (Upper) Depth [m] for the deep transition point """ self.deep_transition_width = (None if deep_transition_width is None else float(deep_transition_width)) """ (Downdip) Width [m] of the deep transition point """ def get_param_vectors(self, patch_depths): r""" Calculate the depth-dependent array of :math:`\alpha_h`, assuming it varies log-linearly with depth. """ assert np.all(np.diff(patch_depths) >= 0) # start knots list knots = [patch_depths[0]] vals_alpha_h = [self.alpha_h] # add optional mid transition if self.mid_transition is not None: knots.append(patch_depths[np.argmin(np.abs(patch_depths - self.mid_transition))]) vals_alpha_h.append(self.alpha_h_mid) # add optional deep transition if self.deep_transition is not None: knots.append(patch_depths[np.argmin(np.abs(patch_depths - self.deep_transition))]) vals_alpha_h.append(self.alpha_h_deep) knots.append(patch_depths[np.argmin(np.abs(patch_depths - self.deep_transition - self.deep_transition_width))]) vals_alpha_h.append(self.alpha_h_boundary) # add final value knots.append(patch_depths[-1]) vals_alpha_h.append(self.alpha_h_boundary) vals_alpha_h = np.array(vals_alpha_h) # interpolate alpha_n and alpha_eff alpha_h_vec = 10**interp1d(knots, np.log10(vals_alpha_h))(patch_depths) return alpha_h_vec @njit(float64[:](float64[:], float64[:], float64[:], float64[:]), cache=True) def dvdt_plvis(dtaudt, v, A, n): r""" Calculate the velocity derivative for a power-law viscous rheology. From :math:`v = \tau^n / \alpha_n` we get: :math:`\frac{dv}{dt} = \frac{n}{\alpha_n} \tau^{n-1} \frac{d \tau}{dt}` where :math:`\tau^{n-1} = \left( \alpha_n v \right)^{\frac{n-1}{n}}` simplifying to :math:`\frac{dv}{dt} = \frac{n}{A} v^{1-\frac{1}{n}} \frac{d \tau}{dt}` Parameters ---------- dtaudt : numpy.ndarray 1D array of the shear stress derivative v : numpy.ndarray 1D array of the current velocity A : numpy.ndarray Rescaled nonlinear viscous rheology strength constant n : numpy.ndarray Power-law exponent Returns ------- dvdt : numpy.ndarray 1D array of the velocity derivative. """ signs = np.sign(v) return (n / A) * (signs * v)**(1 - 1 / n) * dtaudt @njit(float64[:](float64[:], float64[:]), cache=True) def dzetadt_rdlog(dtaudt, alpha_h_vec): r""" Return the velocity derivative in logarithmic space given the current traction rate in linear space. Taking the derivative of the steady-state friction gives an explicit formulation for the slip acceleration :math:`\frac{d \zeta}{dt}`: :math:`\frac{df_{ss}}{dt} = (a-b) \frac{d \zeta}{dt}` Recognizing that :math:`\tau = f_{ss} \sigma_E` and assuming constant effective normal stress leads to :math:`\frac{d \tau}{dt} = \sigma_E \frac{df_{ss}}{dt}`, which can be rearranged to give the final expression :math:`\frac{d \zeta}{dt} = \frac{1}{(a-b) \sigma_E} \frac{d \tau}{dt}` Parameters ---------- dtaudt : numpy.ndarray Traction derivative :math:`\frac{d \tau}{dt}` [Pa/s] in linear space alpha_h_vec : float Rate-and-state parameter :math:`(a - b) * \sigma_E` Returns ------- dzetadt : numpy.ndarray Velocity derivative :math:`\frac{d \zeta}{dt}` [1/s] in logarithmic space. """ return dtaudt / alpha_h_vec @njit(float64[:](float64[:], float64[:], float64[:], float64[:], float64[:]), cache=True) def get_new_vel_plvis(v_minus, delta_tau, alpha_n, n, A): r""" Calculate the instantaneous velocity change due to an instantaneous stress change to the fault patches. It is derived from: :math:`\tau_{+} = \tau_{-} + \Delta \tau` and plugging in the relationship :math:`v = \tau^n / \alpha_n`, we get :math:`\sqrt[n]{\alpha_n v_{+}} = \sqrt[n]{\alpha_n v_{-}} + \Delta \tau` and finally :math:`v_{+} = \frac{\left( A \sqrt[n]{v_{-}} + \Delta \tau \right)^n}{\alpha_n}` Parameters ---------- v_minus : numpy.ndarray Initial velocity :math:`v_{-}` [m/s] delta_tau : numpy.ndarray Traction stress change :math:`\Delta \tau` [Pa] alpha_n : numpy.ndarray Nonlinear viscous rheology strength constant :math:`\alpha_n` [Pa^n * s/m] n : numpy.ndarray Power-law exponent :math:`n` [-] A : numpy.ndarray Rescaled strength term :math:`A = \alpha_n^{1/n}` [Pa * (s/m)^(1/n)] Returns ------- v_plus : numpy.ndarray Velocity :math:`v_{+}` [m/s] after stress change """ signs = np.sign(v_minus) temp = A * (signs * v_minus)**(1 / n) + (signs * delta_tau) return np.abs(temp) ** (n - 1) * temp / alpha_n * signs @njit(float64[:](float64[:], float64[:], float64[:]), cache=True) def get_new_vel_rdlog(zeta_minus, delta_tau, alpha_h_vec): r""" Calculate the instantaneous velocity change (in logarithmic space) due to an instantaneous stress change to the fault patches. We can kickstart the derivatuion from the expression in ``RateStateSteadyLinear.get_new_vel``: :math:`\log (v_{+}/v_0) = \log (v_{-}/v_0) + \Delta\tau / \alpha_h` and realize that we only have to plug in our definition for :math:`\zeta` to give us the final result :math:`\zeta_{+} = \zeta_{-} + \Delta\tau / \alpha_h` Parameters ---------- zeta_minus : numpy.ndarray Initial velocity :math:`\zeta_{-}` [-] in logarithmic space delta_tau : numpy.ndarray, optional Traction stress change :math:`\Delta \tau` [Pa] alpha_h_vec : numpy.ndarray Rate-and-state parameter :math:`(a - b) * \sigma_E` Returns ------- zeta_plus : numpy.ndarray Velocity :math:`\zeta_{+}` [-] in logarithmic space after stress change See Also -------- alpha_h """ return zeta_minus + delta_tau / alpha_h_vec @njit(float64[:](float64, float64[:], float64, float64[:, ::1], float64[:, ::1], float64[:], float64[:], float64), cache=True) def flat_ode_plvis(t, state, v_plate, K_int, K_ext, A_upper, n_upper, mu_over_2vs): r""" Flattened ODE derivative function for a subduction fault with powerlaw-viscous rheology in the upper plate interface, and an imposed constant plate velocity at the lower interface (which can be ignored). Parameters ---------- t : float Current time (needs to be in function call for solve_ivp). state : numpy.ndarray 1D array with the current state of the creeping fault patches, containing (in order) the upper cumulative slip and upper velocity. v_plate : float Plate velocity. K_int : numpy.ndarray 2D array with the stress kernel mapping creeping patches to themselves. K_ext : numpy.ndarray 2D array with the stress kernel mapping the effect of the locked patches onto the creeping patches. A_upper : numpy.ndarray Upper plate interface rescaled nonlinear viscous rheology strength constant n_upper : numpy.ndarray Upper plate interface power-law exponent mu_over_2vs : float Radiation damping factor Returns ------- dstatedt : numpy.ndarray 1D array with the state derivative. """ # get number of variables within state # (depends on rheology, so is hardcoded here) n_vars_upper = 2 n_creeping_upper = state.size // n_vars_upper assert K_int.shape == (n_creeping_upper, n_creeping_upper) assert K_ext.shape[0] == n_creeping_upper # extract total velocities v = state[n_creeping_upper:] # get shear strain rate signs = np.sign(v) temp = mu_over_2vs * (n_upper / A_upper) * (signs * v)**(1 - 1 / n_upper) dtaudt = (K_int @ (v - v_plate) - np.sum(K_ext * v_plate, axis=1) ) / (1 + temp) # get ODE dstatedt = np.concatenate((v, dvdt_plvis(dtaudt, v, A_upper, n_upper))) # return return dstatedt @njit(float64[:](float64, float64[:], float64, float64[:, ::1], float64[:, ::1], float64, float64[:], float64), cache=True) def flat_ode_rdlog(t, state, v_plate, K_int, K_ext, v_0, alpha_h_vec, mu_over_2vs): r""" Flattened ODE derivative function for a subduction fault with powerlaw-viscous rheology in the upper plate interface, and an imposed constant plate velocity at the lower interface (which can be ignored). Parameters ---------- t : float Current time (needs to be in function call for solve_ivp). state : numpy.ndarray 1D array with the current state of the creeping fault patches, containing (in order) the upper cumulative slip and upper velocity. v_plate : float Plate velocity. K_int : numpy.ndarray 2D array with the stress kernel mapping creeping patches to themselves. K_ext : numpy.ndarray 2D array with the stress kernel mapping the effect of the locked patches onto the creeping patches. v_0 : float Reference velocity [m/s] alpha_h_vec : numpy.ndarray Rate-and-state parameter :math:`(a - b) * \sigma_E` mu_over_2vs : float Radiation damping factor Returns ------- dstatedt : numpy.ndarray 1D array with the state derivative. """ # get number of variables within state # (depends on rheology, so is hardcoded here) n_vars_upper = 2 n_creeping_upper = state.size // n_vars_upper assert K_int.shape == (n_creeping_upper, n_creeping_upper) assert K_ext.shape[0] == n_creeping_upper # extract total velocities zeta = state[n_creeping_upper:] v = v_0 * np.exp(zeta) # get shear strain rate temp = mu_over_2vs * v / alpha_h_vec dtaudt = (K_int @ (v - v_plate) - np.sum(K_ext * v_plate, axis=1) ) / (1 + temp) # get ODE dstatedt = np.concatenate((v, dzetadt_rdlog(dtaudt, alpha_h_vec))) # return return dstatedt @njit(float64[:](float64, float64[:], int64, float64[:], float64[:, ::1], float64[:, ::1], float64, float64, float64, float64), cache=True) def flat_ode_plvis_plvis(t, state, n_creeping_upper, v_plate_vec, K_int, K_ext, A_upper, n_upper, A_lower, n_lower): """ Flattened ODE derivative function for a subduction fault with powerlaw-viscous rheology in both the upper and lower plate interface. Parameters ---------- t : float Current time (needs to be in function call for solve_ivp). state : numpy.ndarray 1D array with the current state of the creeping fault patches, containing (in order) the upper cumulative slip, upper velocity, lower cumulative slip, lower velocity. n_creeping_upper : int Number of creeping patches in the upper plate interface. The number of creeping patches in the lower plate interface can then be derived from the size of ``state``. v_plate_vec : float Initial velocity in all creeping patches. K_int : numpy.ndarray 2D array with the stress kernel mapping creeping patches to themselves. K_ext : numpy.ndarray 2D array with the stress kernel mapping the effect of the locked patches onto the creeping patches. A_upper : float Upper plate interface rescaled nonlinear viscous rheology strength constant n_upper : float Upper plate interface power-law exponent A_lower : float Lower plate interface rescaled nonlinear viscous rheology strength constant n_lower : float Lower plate interface power-law exponent Returns ------- dstatedt : numpy.ndarray 1D array with the state derivative. """ # get number of variables within state # (depends on rheology, so is hardcoded here) n_vars_upper, n_vars_lower = 2, 2 n_state_upper = n_vars_upper * n_creeping_upper n_state_lower = state.size - n_state_upper n_creeping_lower = n_state_lower // n_vars_lower n_creeping = n_creeping_lower + n_creeping_upper assert K_int.shape[0] == K_int.shape[1] == n_creeping assert K_ext.shape[0] == n_creeping # split up state state_upper = state[:n_state_upper] state_lower = state[n_state_upper:] # extract total velocities v_upper = state_upper[n_creeping_upper:] v_lower = state_lower[n_creeping_lower:] # get shear strain rate v = np.concatenate((v_upper, v_lower)) dtaudt = (K_int @ (v - v_plate_vec) - np.sum(K_ext * v_plate_vec[0], axis=1)) dtaudt_upper = dtaudt[:n_creeping_upper] dtaudt_lower = dtaudt[n_creeping_upper:] # get individual rheologies' ODE dstatedt_upper = \ np.concatenate((v_upper, dvdt_plvis(dtaudt_upper, v_upper, np.ones_like(v_upper) * A_upper, np.ones_like(v_upper) * n_upper))) dstatedt_lower = \ np.concatenate((v_lower, dvdt_plvis(dtaudt_lower, v_lower, np.ones_like(v_lower) * A_lower, np.ones_like(v_upper) * n_lower))) # concatenate and return return np.concatenate((dstatedt_upper, dstatedt_lower)) @njit(float64[:](float64, float64[:], int64, float64[:], float64[:, ::1], float64[:, ::1], float64, float64, float64, float64), cache=True) def flat_ode_rdlog_plvis(t, state, n_creeping_upper, v_plate_vec, K_int, K_ext, v_0, alpha_h_upper, A_lower, n_lower): r""" Flattened ODE derivative function for a subduction fault with rate-dependent (log-space) rheology in the upper and nonlinear viscous rheology in the lower plate interface. Parameters ---------- t : float Current time (needs to be in function call for solve_ivp). state : numpy.ndarray 1D array with the current state of the creeping fault patches, containing (in order) the upper cumulative slip, upper velocity, lower cumulative slip, lower velocity. n_creeping_upper : int Number of creeping patches in the upper plate interface. The number of creeping patches in the lower plate interface can then be derived from the size of ``state``. v_plate_vec : float Initial velocity in all creeping patches. K_int : numpy.ndarray 2D array with the stress kernel mapping creeping patches to themselves. K_ext : numpy.ndarray 2D array with the stress kernel mapping the effect of the locked patches onto the creeping patches. v_0 : float Reference velocity [m/s] alpha_h_upper : float Upper interface rate-and-state parameter :math:`(a - b) * \sigma_E` [Pa] A_lower : float Lower plate interface rescaled nonlinear viscous rheology strength constant n_lower : float Lower plate interface power-law exponent Returns ------- dstatedt : numpy.ndarray 1D array with the state derivative. """ # get number of variables within state # (depends on rheology, so is hardcoded here) n_vars_upper, n_vars_lower = 2, 2 n_state_upper = n_vars_upper * n_creeping_upper n_state_lower = state.size - n_state_upper n_creeping_lower = n_state_lower // n_vars_lower n_creeping = n_creeping_lower + n_creeping_upper assert K_int.shape[0] == K_int.shape[1] == n_creeping assert K_ext.shape[0] == n_creeping # split up state state_upper = state[:n_state_upper] state_lower = state[n_state_upper:] # extract total velocities v_upper = v_0 * np.exp(state_upper[n_creeping_upper:]) v_lower = state_lower[n_creeping_lower:] # get shear strain rate v = np.concatenate((v_upper, v_lower)) dtaudt = (K_int @ (v - v_plate_vec) - np.sum(K_ext * v_plate_vec[0], axis=1)) dtaudt_upper = dtaudt[:n_creeping_upper] dtaudt_lower = dtaudt[n_creeping_upper:] # get individual rheologies' ODE dstatedt_upper = \ np.concatenate((v_upper, dzetadt_rdlog(dtaudt_upper, np.ones_like(v_lower) * alpha_h_upper))) dstatedt_lower = \ np.concatenate((v_lower, dvdt_plvis(dtaudt_lower, v_lower, np.ones_like(v_lower) * A_lower, np.ones_like(v_upper) * n_lower))) # concatenate and return return np.concatenate((dstatedt_upper, dstatedt_lower)) # simple rk4 @njit(float64[:, :](float64, float64, float64[:], float64[:], int64, float64[:], float64[:, ::1], float64[:, ::1], float64, float64, float64, float64), cache=True) def myrk4(ti, tf, state0, t_eval, n_creeping_upper, v_plate_vec, K_int, K_ext, A_upper, n_upper, A_lower, n_lower): h = t_eval[1] - t_eval[0] num_state = state0.size num_eval = t_eval.size sol = np.zeros((num_eval, num_state)) sol[0, :] = state0 for i in range(1, num_eval): cur = sol[i-1, :] k1 = flat_ode_plvis_plvis(ti, cur, n_creeping_upper, v_plate_vec, K_int, K_ext, A_upper, n_upper, A_lower, n_lower) cur = sol[i-1, :] + (h / 2) * k1 k2 = flat_ode_plvis_plvis(ti, cur, n_creeping_upper, v_plate_vec, K_int, K_ext, A_upper, n_upper, A_lower, n_lower) cur = sol[i-1, :] + (h / 2) * k2 k3 = flat_ode_plvis_plvis(ti, cur, n_creeping_upper, v_plate_vec, K_int, K_ext, A_upper, n_upper, A_lower, n_lower) cur = sol[i-1, :] + h * k3 k4 = flat_ode_plvis_plvis(ti, cur, n_creeping_upper, v_plate_vec, K_int, K_ext, A_upper, n_upper, A_lower, n_lower) sol[i, :] = sol[i-1, :] + (h / 6) * (k1 + 2 * k2 + 2 * k3 + k4) return sol @njit(float64[:, :](float64[:], int64[:], int64[:], int64, int64, float64[:, ::1], float64[:, ::1], float64[:], float64[:], float64[:, ::1], float64[:, ::1], float64[:], float64[:], float64[:], float64), cache=True) def flat_run_plvis(t_eval, i_break, i_eq, n_creeping_upper, n_creeping_lower, K_int, K_ext, v_plate_vec, v_init, slip_taper, delta_tau_bounded, alpha_n_vec, n_vec, A_vec, mu_over_2vs): r""" Run the simulation. Parameters ---------- t_eval : numpy.ndarray Evaluation times [s] i_break : numpy.ndarray Integer indices of cycle breaks [-] i_eq : numpy.ndarray Integer indices of earthquakes within sequence [-] n_creeping_upper : int Number [-] of creeping patches in the upper fault interface n_creeping_lower : int Number [-] of creeping patches in the lower fault interface K_int : numpy.ndarray Internal stress kernel [Pa/m] K_ext : numpy.ndarray External stress kernel [Pa/m] v_plate_vec : numpy.ndarray Plate velocity for all creeping patches [m/s] v_init : numpy.ndarray Initial velocity of the fault patches, in the dimensions of the rheology slip_taper : numpy.ndarray Compensating coseismic tapered slip on creeping patches [m] delta_tau_bounded : numpy.ndarray Bounded coseismic stress change [Pa] alpha_n_vec : numpy.ndarray Upper plate interface nonlinear viscous rheology strength constant [Pa^n * s/m] at each patch n_vec : float Upper plate interface power-law exponent [-] at each patch A_vec : float Rescaled upper plate interface nonlinear viscous rheology strength constant [Pa^n * s/m] at each patch mu_over_2vs : float Radiation damping factor :math:`\mu / 2 v_s`, where :math:`\mu` is the shear modulus [Pa] and :math:`v_s` is the shear wave velocity [m/s] Returns ------- full_state : numpy.ndarray Full state variable at the end of the integration. """ # initialize parameters n_vars_upper, n_vars_lower = 2, 2 n_state_upper = n_vars_upper * n_creeping_upper n_state_lower = n_vars_lower * n_creeping_lower n_eval = t_eval.size n_slips = delta_tau_bounded.shape[1] # initialize arrays s_minus_upper = np.zeros((n_vars_upper - 1) * n_creeping_upper) s_minus_lower = np.zeros(n_creeping_lower) v_minus_upper = v_init[:n_creeping_upper] v_minus_lower = v_plate_vec[n_creeping_upper:] full_state = np.empty((n_state_upper + n_state_lower, n_eval)) full_state[:] = np.NaN state_plus = np.concatenate((s_minus_upper, v_minus_upper, s_minus_lower, v_minus_lower)) # make flat ODE function arguments args = (v_plate_vec[0], K_int[:n_creeping_upper, :n_creeping_upper].copy(), K_ext[:n_creeping_upper, :], A_vec, n_vec, mu_over_2vs) # integrate spun_up = 0 i_slip = 0 steps = np.sort(np.concatenate((i_eq, i_break))) i = 0 atol = np.ones(n_state_upper) * 1e-6 atol[n_creeping_upper:] = 1e-15 while i < steps.size - 1: # print(f"{i+1}/{steps.size - 1}") # get indices ji, jf = steps[i], steps[i+1] ti, tf = t_eval[ji], t_eval[jf] # call integrator with objmode(sol="float64[:, :]", success="boolean"): sol = solve_ivp(flat_ode_plvis, t_span=[ti, tf], y0=state_plus[:n_state_upper], t_eval=t_eval[ji:jf + 1], method="LSODA", rtol=1e-6, atol=atol, args=args) success = sol.success if success: sol = sol.y else: sol = np.empty((1, 1)) if not success: raise RuntimeError("Integrator failed.") # save state to output array full_state[:n_state_upper, ji:jf + 1] = sol # fill in the imposed lower state full_state[n_state_upper:n_state_upper + n_creeping_lower, ji:jf + 1] = \ np.ascontiguousarray(v_plate_vec[n_creeping_upper:]).reshape((-1, 1)) \ * np.ascontiguousarray(t_eval[ji:jf + 1]).reshape((1, -1)) full_state[n_state_upper + n_creeping_lower:, ji:jf + 1] = \ np.ascontiguousarray(v_plate_vec[n_creeping_upper:]).reshape((-1, 1)) # can already stop here if this is the last interval if i == steps.size - 2: break # at the end of a full cycle, check the early stopping criteria if (not spun_up) and (i > n_slips) and (jf in i_break): old_full_state = full_state[:, steps[i-2*n_slips-1]:steps[i-n_slips]] new_full_state = full_state[:, steps[i-n_slips]:steps[i+1]] old_state_upper = old_full_state[:n_state_upper, :] new_state_upper = new_full_state[:n_state_upper, :] old_v_upper = old_state_upper[-n_creeping_upper:, -1] new_v_upper = new_state_upper[-n_creeping_upper:, -1] lhs_upper = np.abs(old_v_upper - new_v_upper) rhs_upper = (1e-3) * np.abs(v_plate_vec[0]) + (1e-3) * np.abs(new_v_upper) stop_now = np.all(lhs_upper <= rhs_upper) if stop_now: spun_up = jf # advance i to the last cycle (don't forget the general advance later) i = steps.size - n_slips - 3 elif spun_up and (jf in i_break): break # apply step change only if there is one if (jf in i_eq): state_upper, state_lower = sol[:n_state_upper, -1], sol[n_state_upper:, -1] s_minus_upper = state_upper[:-n_creeping_upper] v_minus_upper = state_upper[-n_creeping_upper:] s_minus_lower = state_lower[:-n_creeping_lower] v_minus_lower = state_lower[-n_creeping_lower:] s_plus_upper = s_minus_upper.ravel().copy() s_plus_upper[:n_creeping_upper] += slip_taper[:, i_slip] s_plus_lower = s_minus_lower.ravel() v_plus_upper = get_new_vel_plvis(v_minus_upper, delta_tau_bounded[:n_creeping_upper, i_slip], alpha_n_vec, n_vec, A_vec) v_plus_lower = v_minus_lower.ravel() state_plus = np.concatenate((s_plus_upper, v_plus_upper, s_plus_lower, v_plus_lower)) i_slip = (i_slip + 1) % n_slips else: state_plus = sol[:, -1] # advance i += 1 # warn if we never spun up if not spun_up: print(f"Simulation did not spin up after {len(i_break) - 1} cycles!") # done return full_state @njit(float64[:, :](float64[:], int64[:], int64[:], int64, int64, float64[:, ::1], float64[:, ::1], float64[:], float64[:], float64[:, ::1], float64[:, ::1], float64, float64[:], float64), cache=True) def flat_run_rdlog(t_eval, i_break, i_eq, n_creeping_upper, n_creeping_lower, K_int, K_ext, v_plate_vec, v_init, slip_taper, delta_tau_bounded, v_0, alpha_h_vec, mu_over_2vs): r""" Run the simulation. Parameters ---------- t_eval : numpy.ndarray Evaluation times [s] i_break : numpy.ndarray Integer indices of cycle breaks [-] i_eq : numpy.ndarray Integer indices of earthquakes within sequence [-] n_creeping_upper : int Number [-] of creeping patches in the upper fault interface n_creeping_lower : int Number [-] of creeping patches in the lower fault interface K_int : numpy.ndarray Internal stress kernel [Pa/m] K_ext : numpy.ndarray External stress kernel [Pa/m] v_plate_vec : numpy.ndarray Plate velocity for all creeping patches [m/s] v_init : numpy.ndarray Initial velocity of the fault patches, in the dimensions of the rheology slip_taper : numpy.ndarray Compensating coseismic tapered slip on creeping patches [m] delta_tau_bounded : numpy.ndarray Bounded coseismic stress change [Pa] v_0 : float Reference velocity [m/s] alpha_h_vec : numpy.ndarray Upper interface rate-and-state parameter :math:`(a - b) * \sigma_E` [Pa] mu_over_2vs : float Radiation damping factor :math:`\mu / 2 v_s`, where :math:`\mu` is the shear modulus [Pa] and :math:`v_s` is the shear wave velocity [m/s] Returns ------- full_state : numpy.ndarray Full state variable at the end of the integration. """ # initialize parameters n_vars_upper, n_vars_lower = 2, 2 n_state_upper = n_vars_upper * n_creeping_upper n_state_lower = n_vars_lower * n_creeping_lower n_eval = t_eval.size n_slips = delta_tau_bounded.shape[1] # initialize arrays s_minus_upper = np.zeros((n_vars_upper - 1) * n_creeping_upper) s_minus_lower = np.zeros(n_creeping_lower) assert np.all(v_init[:n_creeping_upper] > 0) zeta_minus_upper = np.log(v_init[:n_creeping_upper] / v_0) v_minus_lower = v_plate_vec[n_creeping_upper:] full_state = np.empty((n_state_upper + n_state_lower, n_eval)) full_state[:] = np.NaN state_plus = np.concatenate((s_minus_upper, zeta_minus_upper, s_minus_lower, v_minus_lower)) # make flat ODE function arguments args = (v_plate_vec[0], K_int[:n_creeping_upper, :n_creeping_upper].copy(), K_ext[:n_creeping_upper, :], v_0, alpha_h_vec, mu_over_2vs) # integrate spun_up = 0 i_slip = 0 steps = np.sort(np.concatenate((i_eq, i_break))) i = 0 while i < steps.size - 1: # print(f"{i+1}/{steps.size - 1}") # get indices ji, jf = steps[i], steps[i+1] ti, tf = t_eval[ji], t_eval[jf] # call integrator with objmode(sol="float64[:, :]", success="boolean"): sol = solve_ivp(flat_ode_rdlog, t_span=[ti, tf], y0=state_plus[:n_state_upper], t_eval=t_eval[ji:jf + 1], method="LSODA", args=args) success = sol.success if success: sol = sol.y else: sol = np.empty((1, 1)) if not success: raise RuntimeError("Integrator failed.") # save state to output array full_state[:n_state_upper, ji:jf + 1] = sol # fill in the imposed lower state full_state[n_state_upper:n_state_upper + n_creeping_lower, ji:jf + 1] = \ np.ascontiguousarray(v_plate_vec[n_creeping_upper:]).reshape((-1, 1)) \ * np.ascontiguousarray(t_eval[ji:jf + 1]).reshape((1, -1)) full_state[n_state_upper + n_creeping_lower:, ji:jf + 1] = \ np.ascontiguousarray(v_plate_vec[n_creeping_upper:]).reshape((-1, 1)) # can already stop here if this is the last interval if i == steps.size - 2: break # at the end of a full cycle, check the early stopping criteria if (not spun_up) and (i > n_slips) and (jf in i_break): old_full_state = full_state[:, steps[i-2*n_slips-1]:steps[i-n_slips]] new_full_state = full_state[:, steps[i-n_slips]:steps[i+1]] old_state_upper = old_full_state[:n_state_upper, :] new_state_upper = new_full_state[:n_state_upper, :] old_v_upper = v_0 * np.exp(old_state_upper[-n_creeping_upper:, -1]) new_v_upper = v_0 * np.exp(new_state_upper[-n_creeping_upper:, -1]) lhs_upper = np.abs(old_v_upper - new_v_upper) rhs_upper = (1e-3) * np.abs(v_plate_vec[0]) + (1e-3) * np.abs(new_v_upper) stop_now = np.all(lhs_upper <= rhs_upper) if stop_now: spun_up = jf # advance i to the last cycle (don't forget the general advance later) i = steps.size - n_slips - 3 elif spun_up and (jf in i_break): break # apply step change only if there is one if (jf in i_eq): state_upper, state_lower = sol[:n_state_upper, -1], sol[n_state_upper:, -1] s_minus_upper = state_upper[:-n_creeping_upper] zeta_minus_upper = state_upper[-n_creeping_upper:] s_minus_lower = state_lower[:-n_creeping_lower] v_minus_lower = state_lower[-n_creeping_lower:] s_plus_upper = s_minus_upper.ravel().copy() s_plus_upper[:n_creeping_upper] += slip_taper[:, i_slip] s_plus_lower = s_minus_lower.ravel() zeta_plus_upper = get_new_vel_rdlog(zeta_minus_upper, delta_tau_bounded[:n_creeping_upper, i_slip], alpha_h_vec) v_plus_lower = v_minus_lower.ravel() state_plus = np.concatenate((s_plus_upper, zeta_plus_upper, s_plus_lower, v_plus_lower)) i_slip = (i_slip + 1) % n_slips else: state_plus = sol[:, -1] # advance i += 1 # warn if we never spun up if not spun_up: print(f"Simulation did not spin up after {len(i_break) - 1} cycles!") full_state[n_creeping_upper:n_state_upper, :] = \ v_0 * np.exp(full_state[n_creeping_upper:n_state_upper, :]) # done return full_state @njit(float64[:, :](float64[:], int64[:], int64[:], int64, int64, float64[:, ::1], float64[:, ::1], float64[:], float64[:], float64[:, ::1], float64[:, ::1], float64, float64, float64, float64, boolean), cache=True) def flat_run_plvis_plvis(t_eval, i_break, i_eq, n_creeping_upper, n_creeping_lower, K_int, K_ext, v_plate_vec, v_init, slip_taper, delta_tau_bounded, alpha_n_upper, n_upper, alpha_n_lower, n_lower, simple_rk4): """ Run the simulation. Parameters ---------- t_eval : numpy.ndarray Evaluation times [s] i_break : numpy.ndarray Integer indices of cycle breaks [-] i_eq : numpy.ndarray Integer indices of earthquakes within sequence [-] n_creeping_upper : int Number [-] of creeping patches in the upper fault interface n_creeping_lower : int Number [-] of creeping patches in the lower fault interface K_int : numpy.ndarray Internal stress kernel [Pa/m] K_ext : numpy.ndarray External stress kernel [Pa/m] v_plate_vec : numpy.ndarray Plate velocity for all creeping patches [m/s] v_init : numpy.ndarray Initial velocity of the fault patches, in the dimensions of the rheology slip_taper : numpy.ndarray Compensating coseismic tapered slip on creeping patches [m] delta_tau_bounded : numpy.ndarray Bounded coseismic stress change [Pa] alpha_n_upper : float Upper plate interface nonlinear viscous rheology strength constant [Pa^n * s/m] n_upper : float Upper plate interface power-law exponent [-] alpha_n_lower : float Lower plate interface nonlinear viscous rheology strength constant [Pa^n * s/m] n_lower : float Lower plate interface power-law exponent [-] simple_rk4 : bool Decide whether to use the simple RK4 integrator or not Returns ------- full_state : numpy.ndarray Full state variable at the end of the integration. """ # initialize parameters n_vars_upper, n_vars_lower = 2, 2 n_state_upper = n_vars_upper * n_creeping_upper n_state_lower = n_vars_lower * n_creeping_lower A_upper = alpha_n_upper ** (1 / n_upper) A_lower = alpha_n_lower ** (1 / n_lower) n_eval = t_eval.size n_slips = delta_tau_bounded.shape[1] # initialize arrays s_minus_upper = np.zeros((n_vars_upper - 1) * n_creeping_upper) s_minus_lower = np.zeros(n_creeping_lower) v_minus_upper = v_init[:n_creeping_upper] # if isinstance(self.fault.upper_rheo, rheologies.RateStateSteadyLogarithmic): # v_minus_upper = self.fault.upper_rheo.v2zeta(v_minus_upper) v_minus_lower = v_init[n_creeping_upper:] full_state = np.empty((n_state_upper + n_state_lower, n_eval)) full_state[:] = np.NaN state_plus = np.concatenate((s_minus_upper, v_minus_upper, s_minus_lower, v_minus_lower)) # make flat ODE function arguments args = (n_creeping_upper, v_plate_vec, K_int, K_ext, A_upper, n_upper, A_lower, n_lower) # integrate spun_up = 0 i_slip = 0 steps = np.sort(np.concatenate((i_eq, i_break))) i = 0 while i < steps.size - 1: # get indices ji, jf = steps[i], steps[i+1] ti, tf = t_eval[ji], t_eval[jf] # call integrator if simple_rk4: sol = myrk4(ti, tf, state_plus, t_eval[ji:jf + 1], *args).T else: with objmode(sol="float64[:, :]", success="boolean"): sol = solve_ivp(flat_ode_plvis_plvis, t_span=[ti, tf], y0=state_plus, t_eval=t_eval[ji:jf + 1], method="RK45", rtol=1e-9, atol=1e-12, args=args) success = sol.success sol = sol.y if not success: raise RuntimeError("Integrator failed.") # save state to output array full_state[:, ji:jf + 1] = sol # can already stop here if this is the last interval if i == steps.size - 2: break # at the end of a full cycle, check the early stopping criteria if (not spun_up) and (i > n_slips) and (jf in i_break): old_full_state = full_state[:, steps[i-2*n_slips-1]:steps[i-n_slips]] new_full_state = full_state[:, steps[i-n_slips]:steps[i+1]] old_state_upper = old_full_state[:n_state_upper, :] old_state_lower = old_full_state[n_state_upper:, :] new_state_upper = new_full_state[:n_state_upper, :] new_state_lower = new_full_state[n_state_upper:, :] old_v_upper = old_state_upper[-n_creeping_upper:, -1] old_v_lower = old_state_lower[-n_creeping_lower:, -1] new_v_upper = new_state_upper[-n_creeping_upper:, -1] new_v_lower = new_state_lower[-n_creeping_lower:, -1] # if isinstance(self.fault.upper_rheo, rheologies.RateStateSteadyLogarithmic): # old_v_upper = self.fault.upper_rheo.zeta2v(old_v_upper) # new_v_upper = self.fault.upper_rheo.zeta2v(new_v_upper) lhs_upper = np.abs(old_v_upper - new_v_upper) lhs_lower = np.abs(old_v_lower - new_v_lower) rhs_upper = (1e-4) * np.abs(v_plate_vec[0]) + (1e-4) * np.abs(new_v_upper) rhs_lower = (1e-4) * np.abs(v_plate_vec[-1]) + (1e-4) * np.abs(new_v_lower) stop_now = np.all(lhs_upper <= rhs_upper) & np.all(lhs_lower <= rhs_lower) if stop_now: spun_up = jf # advance i to the last cycle (don't forget the general advance later) i = steps.size - n_slips - 3 elif spun_up and (jf in i_break): break # apply step change only if there is one if (jf in i_eq): state_upper, state_lower = sol[:n_state_upper, -1], sol[n_state_upper:, -1] s_minus_upper = state_upper[:-n_creeping_upper] v_minus_upper = state_upper[-n_creeping_upper:] s_minus_lower = state_lower[:-n_creeping_lower] v_minus_lower = state_lower[-n_creeping_lower:] s_plus_upper = s_minus_upper.ravel().copy() s_plus_upper[:n_creeping_upper] += slip_taper[:, i_slip] s_plus_lower = s_minus_lower.ravel() v_plus_upper = get_new_vel_plvis(v_minus_upper, delta_tau_bounded[:n_creeping_upper, i_slip], np.ones(n_creeping_upper) * alpha_n_upper, np.ones(n_creeping_upper) * n_upper, np.ones(n_creeping_upper) * A_upper) v_plus_lower = get_new_vel_plvis(v_minus_lower, delta_tau_bounded[n_creeping_upper:, i_slip], np.ones(n_creeping_upper) * alpha_n_lower, np.ones(n_creeping_upper) * n_lower, np.ones(n_creeping_upper) * A_lower) state_plus = np.concatenate((s_plus_upper, v_plus_upper, s_plus_lower, v_plus_lower)) i_slip = (i_slip + 1) % n_slips else: state_plus = sol[:, -1] # advance i += 1 # warn if we never spun up if not spun_up: print(f"Simulation did not spin up after {len(i_break) - 1} cycles!") # if isinstance(self.fault.upper_rheo, rheologies.RateStateSteadyLogarithmic): # vel_upper = self.fault.upper_rheo.zeta2v(vel_upper) # done return full_state @njit(float64[:, :](float64[:], int64[:], int64[:], int64, int64, float64[:, ::1], float64[:, ::1], float64[:], float64[:], float64[:, ::1], float64[:, ::1], float64, float64, float64, float64, boolean), cache=True) def flat_run_rdlog_plvis(t_eval, i_break, i_eq, n_creeping_upper, n_creeping_lower, K_int, K_ext, v_plate_vec, v_init, slip_taper, delta_tau_bounded, v_0, alpha_h_upper, alpha_n_lower, n_lower, simple_rk4): r""" Run the simulation. Parameters ---------- t_eval : numpy.ndarray Evaluation times [s] i_break : numpy.ndarray Integer indices of cycle breaks [-] i_eq : numpy.ndarray Integer indices of earthquakes within sequence [-] n_creeping_upper : int Number [-] of creeping patches in the upper fault interface n_creeping_lower : int Number [-] of creeping patches in the lower fault interface K_int : numpy.ndarray Internal stress kernel [Pa/m] K_ext : numpy.ndarray External stress kernel [Pa/m] v_plate_vec : numpy.ndarray Plate velocity for all creeping patches [m/s] v_init : numpy.ndarray Initial velocity of the fault patches, in the dimensions of the rheology slip_taper : numpy.ndarray Compensating coseismic tapered slip on creeping patches [m] delta_tau_bounded : numpy.ndarray Bounded coseismic stress change [Pa] v_0 : float Reference velocity [m/s] alpha_h_upper : float Upper interface rate-and-state parameter :math:`(a - b) * \sigma_E` [Pa] alpha_n_lower : float Lower plate interface nonlinear viscous rheology strength constant [Pa^n * s/m] n_lower : float Lower plate interface power-law exponent [-] simple_rk4 : bool Decide whether to use the simple RK4 integrator or not Returns ------- full_state : numpy.ndarray Full state variable at the end of the integration. """ # initialize parameters n_vars_upper, n_vars_lower = 2, 2 n_state_upper = n_vars_upper * n_creeping_upper n_state_lower = n_vars_lower * n_creeping_lower A_lower = alpha_n_lower ** (1 / n_lower) n_eval = t_eval.size n_slips = delta_tau_bounded.shape[1] # initialize arrays s_minus_upper = np.zeros((n_vars_upper - 1) * n_creeping_upper) s_minus_lower = np.zeros(n_creeping_lower) assert np.all(v_init[:n_creeping_upper] > 0) v_minus_upper = np.log(v_init[:n_creeping_upper] / v_0) # if isinstance(self.fault.upper_rheo, rheologies.RateStateSteadyLogarithmic): # v_minus_upper = self.fault.upper_rheo.v2zeta(v_minus_upper) v_minus_lower = v_init[n_creeping_upper:] full_state = np.empty((n_state_upper + n_state_lower, n_eval)) full_state[:] = np.NaN state_plus = np.concatenate((s_minus_upper, v_minus_upper, s_minus_lower, v_minus_lower)) # make flat ODE function arguments args = (n_creeping_upper, v_plate_vec, K_int, K_ext, v_0, alpha_h_upper, A_lower, n_lower) # integrate spun_up = 0 i_slip = 0 steps = np.sort(np.concatenate((i_eq, i_break))) i = 0 while i < steps.size - 1: # get indices ji, jf = steps[i], steps[i+1] ti, tf = t_eval[ji], t_eval[jf] # call integrator if simple_rk4: sol = myrk4(ti, tf, state_plus, t_eval[ji:jf + 1], *args).T else: with objmode(sol="float64[:, :]", success="boolean"): sol = solve_ivp(flat_ode_rdlog_plvis, t_span=[ti, tf], y0=state_plus, t_eval=t_eval[ji:jf + 1], method="RK45", rtol=1e-9, atol=1e-12, args=args) success = sol.success sol = sol.y if not success: raise RuntimeError("Integrator failed.") # save state to output array full_state[:, ji:jf + 1] = sol # can already stop here if this is the last interval if i == steps.size - 2: break # at the end of a full cycle, check the early stopping criteria if (not spun_up) and (i > n_slips) and (jf in i_break): old_full_state = full_state[:, steps[i-2*n_slips-1]:steps[i-n_slips]] new_full_state = full_state[:, steps[i-n_slips]:steps[i+1]] old_state_upper = old_full_state[:n_state_upper, :] old_state_lower = old_full_state[n_state_upper:, :] new_state_upper = new_full_state[:n_state_upper, :] new_state_lower = new_full_state[n_state_upper:, :] old_v_upper = v_0 * np.exp(old_state_upper[-n_creeping_upper:, -1]) old_v_lower = old_state_lower[-n_creeping_lower:, -1] new_v_upper = v_0 * np.exp(new_state_upper[-n_creeping_upper:, -1]) new_v_lower = new_state_lower[-n_creeping_lower:, -1] # if isinstance(self.fault.upper_rheo, rheologies.RateStateSteadyLogarithmic): # old_v_upper = self.fault.upper_rheo.zeta2v(old_v_upper) # new_v_upper = self.fault.upper_rheo.zeta2v(new_v_upper) lhs_upper = np.abs(old_v_upper - new_v_upper) lhs_lower = np.abs(old_v_lower - new_v_lower) rhs_upper = (1e-4) * np.abs(v_plate_vec[0]) + (1e-4) * np.abs(new_v_upper) rhs_lower = (1e-4) * np.abs(v_plate_vec[-1]) + (1e-4) * np.abs(new_v_lower) stop_now = np.all(lhs_upper <= rhs_upper) & np.all(lhs_lower <= rhs_lower) if stop_now: spun_up = jf # advance i to the last cycle (don't forget the general advance later) i = steps.size - n_slips - 3 elif spun_up and (jf in i_break): break # apply step change only if there is one if (jf in i_eq): state_upper, state_lower = sol[:n_state_upper, -1], sol[n_state_upper:, -1] s_minus_upper = state_upper[:-n_creeping_upper] zeta_minus_upper = state_upper[-n_creeping_upper:] s_minus_lower = state_lower[:-n_creeping_lower] v_minus_lower = state_lower[-n_creeping_lower:] s_plus_upper = s_minus_upper.ravel().copy() s_plus_upper[:n_creeping_upper] += slip_taper[:, i_slip] s_plus_lower = s_minus_lower.ravel() zeta_plus_upper = get_new_vel_rdlog(zeta_minus_upper, delta_tau_bounded[:n_creeping_upper, i_slip], np.ones(n_creeping_upper) * alpha_h_upper) v_plus_lower = get_new_vel_plvis(v_minus_lower, delta_tau_bounded[n_creeping_upper:, i_slip], np.ones(n_creeping_upper) * alpha_n_lower, np.ones(n_creeping_upper) * n_lower, np.ones(n_creeping_upper) * A_lower) state_plus = np.concatenate((s_plus_upper, zeta_plus_upper, s_plus_lower, v_plus_lower)) i_slip = (i_slip + 1) % n_slips else: state_plus = sol[:, -1] # advance i += 1 # warn if we never spun up if not spun_up: print(f"Simulation did not spin up after {len(i_break) - 1} cycles!") full_state[n_creeping_upper:n_state_upper, :] = \ v_0 * np.exp(full_state[n_creeping_upper:n_state_upper, :]) # if isinstance(self.fault.upper_rheo, rheologies.RateStateSteadyLogarithmic): # vel_upper = self.fault.upper_rheo.zeta2v(vel_upper) # done return full_state @njit(float64[:, :](float64[:, ::1], int64, int64, float64[:, ::1], float64[:, ::1]), cache=True) # optional(float64[:, ::1]), optional(float64[:, ::1]))) def get_surface_displacements_plvis_plvis(full_state, n_creeping_upper, n_creeping_lower, G_surf, deep_creep_slip): # , locked_slip): """ Calculate the surface displacements given the output of ``run``. Parameters ---------- full_state : numpy.ndarray Full state variable at the end of the integration. n_creeping_upper : int Number [-] of creeping patches in the upper fault interface n_creeping_lower : int Number [-] of creeping patches in the lower fault interface G_surf : numpy.ndarray Surface displacements Green's matrix [-] (dimensions must whether `locked_slip` and/or `deep_creep_slip` are passed to function) deep_creep_slip : numpy.ndarray Timeseries of slip [m] on the deep creep patches locked_slip : numpy.ndarray, optional Timeseries of slip [m] on the locked patches Returns ------- surf_disp : numpy.ndarray Surface displacement timeseries. """ # extract timeseries from solution slip_upper = full_state[:n_creeping_upper, :] slip_lower = full_state[2 * n_creeping_upper:2 * n_creeping_upper + n_creeping_lower, :] # add the locked and deep patches to the combined upper & lower slip history matrix slips_all = np.concatenate((slip_upper, slip_lower), axis=0) # if locked_slip is not None:s # slips_all = np.concatenate((locked_slip[:, :slip_upper.shape[1]], slips_all), # axis=0) # if deep_creep_slip is not None: slips_all = np.concatenate((slips_all, deep_creep_slip), axis=0) # calculate all surface displacements for last full cycle surf_disps = G_surf @ slips_all return surf_disps class Fault2D(): """ Base class for the subduction fault mesh. """ def __init__(self, theta, D_lock, H, nu, E, v_s, halflen, upper_rheo, n_upper, lower_rheo, n_lower_left, n_lower_right, halflen_factor_lower, D_max=None, x1_pretrench=None): """ Define the fault mesh of the subduction zone fault system, based on the Elastic Subducting Plate Model (ESPM) of [kanda2010]_. Parameters ---------- theta : float Dip angle [rad] of the plate interface (positive). D_lock : float Locking depth [m] of the upper plate interface (positive). H : float Subducting plate thickness [m]. nu : float Poisson's ratio [-] of the fault zone. E : float Young's modulus [Pa] of the fault zone. v_s : float Shear wave velocity [m/s] in the fault zone. halflen : float Fault patch half-length [m], used for all locked patches. If ``D_max`` and ``x1_pretrench`` are not set, this length is also used for all creeping patches, otherwise, this is their minimum half-length. upper_rheo : Rheology Upper plate interface's rheology. n_upper : int Number [-] of patches on upper plate interface. lower_rheo : Rheology Lower plate interface's rheology. Pass ``None`` if it should not be simulated, but enforced to have the plate velocity. n_lower_left : int Number [-] of patches on lower plate interface (left of the bend). n_lower_right : int Number [-] of patches on lower plate interface (right of the bend). halflen_factor_lower : float Factor used to get a different minimum half-length of the patches on the lower plate interface. D_max : float, optional Maximum depth [m] of the upper plate interface (positive). If set, this makes the mesh use linearly-increasing patch sizes away from the locked zone. (``x1_pretrench`` must be set as well.) x1_pretrench : float, optional Horizontal distance [m] of the lower plate interface before the trench (positive). If set, this makes the mesh use linearly-increasing patch sizes away from the locked zone. (``D_max`` must be set as well.) References ---------- .. [kanda2010] Kanda, R. V. S., & Simons, M. (2010). *An elastic plate model for interseismic deformation in subduction zones.* Journal of Geophysical Research: Solid Earth, 115(B3). doi:`10.1029/2009JB006611 <https://doi.org/10.1029/2009JB006611>`_. """ # initialize self.theta = float(theta) """ Subducting plate dip angle [rad] """ assert 0 < self.theta < np.pi / 2 self.D_lock = float(D_lock) """ Theoretical locking depth [m] of the upper plate interface """ assert self.D_lock > 0 self.H = float(H) """ Subducting plate thickness [m] """ assert self.H >= 0 self.nu = float(nu) """ Poisson's ratio [-] of the fault zone """ self.E = float(E) """ Young's modulus [Pa] of the fault zone """ self.halflen = float(halflen) """ Fault patch half-length [m] on upper interface """ assert self.halflen > 0 self.upper_rheo = upper_rheo """ Upper plate interface's rheology """ assert isinstance(self.upper_rheo, Rheology) self.n_upper = int(n_upper) """ Number [-] of patches on upper plate interface """ assert self.n_upper >= 1 self.lower_rheo = lower_rheo """ Lower plate interface's rheology """ assert isinstance(self.lower_rheo, Rheology) or \ (self.lower_rheo is None) self.n_lower_left = int(n_lower_left) """ Number [-] of patches on lower plate interface (left of bend) """ assert self.n_lower_left >= 1 self.n_lower_right = int(n_lower_right) """ Number [-] of patches on lower plate interface (right of bend) """ assert self.n_lower_right >= 1 self.halflen_factor_lower = float(halflen_factor_lower) """ Prefactor [-] to change the lower interface half-length """ assert self.halflen_factor_lower >= 1 self.lower_halflen = self.halflen * self.halflen_factor_lower """ Fault patch half-length [m] on lower interface """ if self.lower_rheo is not None: assert self.H >= 2 * self.lower_halflen, "Plate too thin for given patch sizes." self.v_s = float(v_s) """ Shear wave velocity [m/s] in the fault zone """ self.mu_over_2vs = self.E / (2 * (1 + self.nu) * 2 * self.v_s) """ Radiation damping term [Pa * s/m] """ # switch between constant or linearly-varying patch sizes if (D_max is not None) and (x1_pretrench is not None): D_max = float(D_max) x1_pretrench = float(x1_pretrench) assert D_max > 0 assert x1_pretrench > 0 variable_mesh = True else: D_max = None x1_pretrench = None variable_mesh = False self.D_max = D_max """ Maximum depth [m] of the upper plate interface (optional) """ self.x1_pretrench = x1_pretrench """ Horizontal distance [m] of the lower plate interface before the trench (optional) """ self.variable_mesh = variable_mesh """ Flag whether the creeping patches are linearly-varying in size, or not """ # create mesh, centered about the x2 axis if self.variable_mesh: # project the locking depth onto dip angle L_lock = self.D_lock / np.sin(self.theta) # get number of locked and creeping patches on upper interface n_lock = int(L_lock // (2 * self.halflen)) n_creep_up = self.n_upper - n_lock assert n_creep_up > 0, "Current geometry yields no upper creeping patches." # project maximum interface depth onto dip angle L_max = self.D_max / np.sin(self.theta) # get length of creeping segment that needs to be linearly varying delta_L = L_max - n_lock * 2 * self.halflen # get linear half-length increase necessary given the number of patches # and length of creeping segment, on all three interface regions delta_h_upper = ((delta_L - 2 * self.halflen * n_creep_up) / (n_creep_up**2 - n_creep_up)) delta_h_lower_right = \ ((L_max - 2 * self.lower_halflen * self.n_lower_right) / (self.n_lower_right**2 - self.n_lower_right)) delta_h_lower_left = \ ((self.x1_pretrench - 2 * self.lower_halflen * self.n_lower_left) / (self.n_lower_left**2 - self.n_lower_left)) # check that we're not running into numerical problems from starkly # increasing patch sizes if any([d > 0.2 for d in [delta_h_upper / self.halflen, delta_h_lower_right / self.lower_halflen, delta_h_lower_left / self.lower_halflen]]): raise ValueError("Half-length increase greater than 20%.") # build vector of half-lengths halflen_vec = np.concatenate([ np.ones(n_lock) * self.halflen, self.halflen + np.arange(n_creep_up) * delta_h_upper, (self.lower_halflen + np.arange(self.n_lower_left) * delta_h_lower_left)[::-1], self.lower_halflen + np.arange(self.n_lower_right) * delta_h_lower_right]) else: # build half-length vector from constant size halflen_vec = np.ones(self.n_upper + self.n_lower_left + self.n_lower_right ) * self.halflen halflen_vec[self.n_upper:] *= self.halflen_factor_lower self.halflen_vec = halflen_vec """ Half-lengths [m] for each patch in the fault """ s = self.H * np.tan(self.theta / 2) R = np.array([[np.cos(-self.theta), -np.sin(-self.theta)], [np.sin(-self.theta), np.cos(-self.theta)]]) # upper plate interface upper_right_x1 = np.concatenate([[0], np.cumsum(2*self.halflen_vec[:self.n_upper])]) upper_right_x2 = np.zeros_like(upper_right_x1) upper_right = R @ np.stack([upper_right_x1, upper_right_x2], axis=0) # lower left plate interface temp = self.halflen_vec[self.n_upper + self.n_lower_left - 1:self.n_upper - 1:-1] lower_left_x1 = -s - np.concatenate([[0], np.cumsum(2*temp)])[::-1] lower_left_x2 = -self.H * np.ones(self.n_lower_left + 1) lower_left = np.stack([lower_left_x1, lower_left_x2], axis=0) # lower right lower_right_x1 = np.concatenate([ [0], np.cumsum(2*self.halflen_vec[self.n_upper + self.n_lower_left:])]) lower_right_x2 = np.zeros_like(lower_right_x1) lower_right = (R @ np.stack([lower_right_x1, lower_right_x2], axis=0) - np.array([[s], [self.H]])) # concatenate mesh parts self.end_upper = upper_right """ 2-element coordinates of upper fault patch endpoints [m] """ self.end_lower = np.concatenate([lower_left, lower_right[:, 1:]], axis=1) """ 2-element coordinates of lower fault patch endpoints [m] """ self.end = np.concatenate([self.end_upper, self.end_lower], axis=1) """ 2-element coordinates of fault patch endpoints [m] """ self.mid = np.concatenate([upper_right[:, :-1] + upper_right[:, 1:], lower_left[:, :-1] + lower_left[:, 1:], lower_right[:, :-1] + lower_right[:, 1:]], axis=1) / 2 """ 2-element coordinates of fault patch midpoints [m] """ self.mid_x1 = self.mid[0, :] """ :math:`x_1` coordinates of fault patch midpoints [m] """ self.mid_x2 = self.mid[1, :] """ :math:`x_2` coordinates of fault patch midpoints [m] """ # access subparts self.ix_upper = np.arange(self.mid_x1.size) < upper_right_x1.size """ Mask of upper fault interface patches """ self.ix_lower = ~self.ix_upper """ Mask of lower fault interface patches (if existing) """ # locked is the part that slips coseismically on the upper plate interface self.x1_lock = self.D_lock / np.tan(self.theta) """ Theoretical surface location [m] of end of locked interface """ ix_locked = self.mid_x1 <= self.x1_lock - self.halflen ix_locked[self.n_upper:] = False self.ix_locked = ix_locked """ Mask of fault patches that are locked interseismically """ self.n_locked = (self.ix_locked).sum() """ Number [-] of locked patches """ # assert self.n_locked == n_lock self.n_creeping = (~self.ix_locked).sum() """ Number [-] of creeping patches """ self.n_creeping_upper = (~self.ix_locked[:self.n_upper]).sum() """ Number [-] of creeping patches in the upper fault interface """ # assert self.n_creeping_upper == n_creep_up self.n_creeping_lower = self.n_creeping - self.n_creeping_upper """ Number [-] of creeping patches in the lower fault interface """ assert self.n_creeping_lower == n_lower_left + n_lower_right self.mid_x1_locked = self.mid_x1[self.ix_locked] """ :math:`x_1` coordinates of locked fault patch midpoints [m] """ self.mid_x2_locked = self.mid_x2[self.ix_locked] """ :math:`x_2` coordinates of locked fault patch midpoints [m] """ self.mid_x1_creeping = self.mid_x1[~self.ix_locked] """ :math:`x_1` coordinates of creeping fault patch midpoints [m] """ self.mid_x2_creeping = self.mid_x2[~self.ix_locked] """ :math:`x_2` coordinates of creeping fault patch midpoints [m] """ # for later calculations, need theta and unit vectors in vector form theta_vec = np.ones_like(self.mid_x1) * self.theta theta_vec[self.n_upper:self.n_upper + self.n_lower_left] = np.pi theta_vec[self.n_upper + self.n_lower_left:] += np.pi self.theta_vec = theta_vec """ Plate dip angle [rad] for all fault patches """ self.e_f = np.stack([np.sin(self.theta_vec), np.cos(self.theta_vec)], axis=0) """ Unit vectors [-] normal to fault patches""" self.e_s = np.stack([-np.cos(self.theta_vec), np.sin(self.theta_vec)], axis=0) """ Unit vectors [-] in fault patch slip direction """ # get external (from the locked to the creeping patches) stress kernel K = Klinedisp(self.mid_x1_creeping, self.mid_x2_creeping, self.mid_x1_locked, self.mid_x2_locked, self.halflen_vec[self.ix_locked], self.theta_vec[self.ix_locked], self.nu, self.E )[:, :self.n_locked] Kx1x1 = K[:self.n_creeping, :] Kx2x2 = K[self.n_creeping:2*self.n_creeping, :] Kx1x2 = K[2*self.n_creeping:3*self.n_creeping, :] K = np.stack([Kx1x1.ravel(), Kx1x2.ravel(), Kx1x2.ravel(), Kx2x2.ravel()] ).reshape(2, 2, self.n_creeping, self.n_locked).transpose(2, 3, 0, 1) self.K_ext = np.einsum("ki,ijkl,li->ij", self.e_s[:, ~self.ix_locked], K, self.e_f[:, ~self.ix_locked], optimize=True) """ External stress kernel [Pa/m] """ # get internal (within creeping patches) stress kernel K = Klinedisp(self.mid_x1_creeping, self.mid_x2_creeping, self.mid_x1_creeping, self.mid_x2_creeping, self.halflen_vec[~self.ix_locked], self.theta_vec[~self.ix_locked], self.nu, self.E )[:, :self.n_creeping] Kx1x1 = K[:self.n_creeping, :] Kx2x2 = K[self.n_creeping:2*self.n_creeping, :] Kx1x2 = K[2*self.n_creeping:3*self.n_creeping, :] K = np.stack([Kx1x1.ravel(), Kx1x2.ravel(), Kx1x2.ravel(), Kx2x2.ravel()] ).reshape(2, 2, self.n_creeping, self.n_creeping).transpose(2, 3, 0, 1) self.K_int = np.einsum("ki,ijkl,li->ij", self.e_s[:, ~self.ix_locked], K, self.e_f[:, ~self.ix_locked], optimize=True) """ Internal stress kernel [Pa/m] """ self.n_state_upper = self.upper_rheo.n_vars * self.n_creeping_upper """ Size [-] of upper plate interface state variable """ self.n_state_lower = (self.lower_rheo.n_vars * self.n_creeping_lower if self.lower_rheo is not None else 2 * self.n_creeping_lower) """ Size [-] of lower plate interface state variable """ if (self.n_creeping_upper == 0) or (self.n_creeping_lower == 0): raise ValueError("Defined geometry results in zero creeping patches in " "either the upper or lower plate interface.") # # if upper rheology is Burgers, tell it our specific shear modulus # if isinstance(self.upper_rheo, rheologies.LinearBurgers): # self.upper_rheo.set_G(self.K_int[:self.n_creeping_upper, :self.n_creeping_upper]) # discretized locking depth self.D_lock_disc = -self.end_upper[1, self.n_locked] """ Discretized locking depth [m] of the upper plate interface """ self.x1_lock_disc = self.D_lock_disc / np.tan(self.theta) """ Discretized surface location [m] of end of locked interface """ class SubductionSimulation(): """ Subduction simulation container class. """ def __init__(self, v_plate, n_cycles_max, n_samples_per_eq, delta_tau_max, v_max, fault, Ds_0, Ds_0_logsigma, T_rec, T_rec_logsigma, D_asp_min, D_asp_max, T_anchor, T_last, enforce_v_plate, largehalflen, t_obs, pts_surf): """ Create a subduction simulation. Parameters ---------- v_plate : float Nominal far-field plate velocity, in the dimensions of the rheology n_cycles_max : int Maximum number of cycles to simulate [-] n_samples_per_eq : int Number of internal evaluation timesteps between earthquakes [-] delta_tau_max : float Maximum shear stress change [Pa] from coseismic slip on locked patches v_max : float Maximum slip velocity [m/s] on creeping patches fault : Fault2D Fault object Ds_0 : numpy.ndarray Nominal coseismic left-lateral shearing [m] of the locked fault patch(es) Ds_0_logsigma : numpy.ndarray Standard deviation of the fault slip in logarithmic space T_rec : numpy.ndarray Nominal recurrence time [a] for each earthquake T_rec_logsigma : numpy.ndarray Standard deviation of the recurrence time in logarithmic space D_asp_min : numpy.ndarray Minimum depth [m] for the asperities of each earthquake D_asp_max : numpy.ndarray Maximum depth [m] for the asperities of each earthquake T_anchor : str Anchor date where observations end T_last : list Dates of the last occurence for each earthquake (list of strings) enforce_v_plate : bool Flag whether to allow v_plate to vary or not largehalflen : float Fault patch half-length of the deep crreep patches [m] t_obs : numpy.ndarray, pandas.DatetimeIndex Observation timesteps, either as decimal years relative to the cycle start, or as Timestamps pts_surf : numpy.ndarray Horizontal landward observation coordinates [m] relative to the trench """ # save general sequence & fault parameters self.v_plate = float(v_plate) """ Nominal far-field plate velocity, in the dimensions of the rheology """ self.n_cycles_max = int(n_cycles_max) """ Maximum number of cycles to simulate [-] """ self.n_samples_per_eq = int(n_samples_per_eq) """ Number of internal evaluation timesteps between earthquakes [-] """ self.delta_tau_max = float(delta_tau_max) """ Maximum shear stress change [Pa] from coseismic slip on locked patches """ self.v_max = float(v_max) """ Maximum slip velocity [m/s] on creeping patches """ # define fault assert isinstance(fault, Fault2D) if not (isinstance(fault.upper_rheo, NonlinearViscous) or isinstance(fault.upper_rheo, RateStateSteadyLogarithmic)) or \ not (isinstance(fault.lower_rheo, NonlinearViscous) or (fault.lower_rheo is None)): raise NotImplementedError("SubductionSimulation is only implemented for " "NonlinearViscous or RateStateSteadyLogarithmic " "rheologies in the upper interface, and NonlinearViscous " "rheology in the lower interface.") self.fault = fault """ Fault object """ # cast earthquake slips as NumPy array self.Ds_0 = np.atleast_1d(Ds_0) """ Nominal coseismic left-lateral shearing [m] of the locked fault patch(es) """ self.Ds_0_logsigma = np.atleast_1d(Ds_0_logsigma) """ Standard deviation of the fault slip in logarithmic space """ # load recurrence times self.T_rec = np.atleast_1d(T_rec) """ Nominal recurrence time [a] for each earthquake """ self.T_rec_logsigma = np.atleast_1d(T_rec_logsigma) """ Standard deviation of the recurrence time in logarithmic space """ # load the minimum and maximum depths of the earthquakes self.D_asp_min = np.atleast_1d(D_asp_min) """ Minimum depth [m] for the asperities of each earthquake """ self.D_asp_max = np.atleast_1d(D_asp_max) """ Maximum depth [m] for the asperities of each earthquake """ assert all([D <= self.fault.D_lock for D in self.D_asp_max]), \ f"Asperity depths {self.D_asp_max/1e3} km are deeper than the " \ f"locking depth {self.fault.D_lock/1e3}." self.T_anchor = str(T_anchor) """ Anchor date where observations end """ assert isinstance(T_last, list) and all([isinstance(tl, str) for tl in T_last]) self.T_last = T_last """ Dates of the last occurence for each earthquake """ # create a NumPy array that for each locked asperity has the slip per earthquake self.slip_mask = np.logical_and(self.fault.mid_x2_locked.reshape(-1, 1) < -self.D_asp_min.reshape(1, -1), self.fault.mid_x2_locked.reshape(-1, 1) > -self.D_asp_max.reshape(1, -1)) """ Mask that matches each earthquake to a fault patch """ self.T_fullcycle = np.lcm.reduce(self.T_rec) """ Nominal recurrence time [a] for an entire joint earthquake cycle """ self.n_eq = self.Ds_0.size """ Number of distinct earthquakes in sequence """ self.n_eq_per_asp = (self.T_fullcycle / self.T_rec).astype(int) """ Number of earthquakes per asperity and full cycle """ # create realization of the slip amount and earthquake timings rng = np.random.default_rng() # first, create realizations of occurence times # note that this will result in a varying plate velocity rate # (ignore zero-slip earthquakes) self.T_rec_per_asp = [rng.lognormal(np.log(t), s, n) for t, s, n in zip(self.T_rec, self.T_rec_logsigma, self.n_eq_per_asp)] """ Recurrence time [a] realization """ self.Ds_0_per_asp = [rng.lognormal(np.log(d), s, n) if d > 0 else np.array([d] * n) for d, s, n in zip(self.Ds_0, self.Ds_0_logsigma, self.n_eq_per_asp)] """ Fault slip [m] realization """ # sanity check that in each asperity, the nominal plate rate is recovered self.slip_asperities = self.slip_mask.astype(int) * self.Ds_0.reshape(1, -1) """ Slip [m] for each earthquake in each asperity """ v_eff_in_asp = (self.slip_asperities / self.T_rec.reshape(1, -1)).sum(axis=1) assert np.allclose(v_eff_in_asp, self.v_plate * 86400 * 365.25), \ "The nominal plate rate is not recovered in all asperities.\n" \ f"Plate velocity = {self.v_plate * 86400 * 365.25}\n" \ f"Effective velocity in each asperity:\n{v_eff_in_asp}" # second, we need to shift the random realization for each earthquake # individually such that they all yield the same v_plate (enforced or not) # get the effective recurrence time as implied by the T_rec realizations T_fullcycle_per_asp_eff = np.array([sum(t) for t in self.T_rec_per_asp]) # same for the effective cumulative slip Ds_0_fullcycle_per_asp_eff = np.array([sum(d) for d in self.Ds_0_per_asp]) # we need to scale each individual sequence such that it implies the same # recurrence time and cumulative slip in each asperity # (again ignoring zero-slip earthquakes) T_fullcycle_eff_mean = np.mean(T_fullcycle_per_asp_eff) Ds_0_fullcycle_mean = np.ma.masked_equal(Ds_0_fullcycle_per_asp_eff, 0).mean() T_rec_per_asp_adj = [np.array(self.T_rec_per_asp[i]) * T_fullcycle_eff_mean / T_fullcycle_per_asp_eff[i] for i in range(self.n_eq)] Ds_0_per_asp_adj = [np.array(self.Ds_0_per_asp[i]) * Ds_0_fullcycle_mean / Ds_0_fullcycle_per_asp_eff[i] if self.Ds_0[i] > 0 else np.array(self.Ds_0_per_asp[i]) for i in range(self.n_eq)] # now each asperity has the same effective plate velocity, which can be different # from the nominal one - if we want to enforce the nominal plate velocity, # we can rescale the recurrence times again self.enforce_v_plate = bool(enforce_v_plate) """ Flag whether to allow v_plate to vary or not """ ix_nonzero_slip = np.argmax(self.Ds_0 > 0) v_plate_eff = (sum(Ds_0_per_asp_adj[ix_nonzero_slip]) / sum(T_rec_per_asp_adj[ix_nonzero_slip]) / 86400 / 365.25) if self.enforce_v_plate: v_plate_factor = self.v_plate / v_plate_eff for i in range(self.n_eq): T_rec_per_asp_adj[i] /= v_plate_factor v_plate_eff = self.v_plate self.v_plate_eff = v_plate_eff """ Effective far-field plate velocity [m/s] """ self.T_eff = sum(T_rec_per_asp_adj[0]) """ Effective length [a] of entire earthquake sequence """ # third, we need to create a list of earthquake dates and associated slips temp_slips = np.vstack([self.slip_mask[:, i].reshape(1, -1) * Ds_0_per_asp_adj[i].reshape(-1, 1) for i in range(self.n_eq)]) year_offsets = [(pd.Period(self.T_anchor, "D") - pd.Period(self.T_last[i], "D") ).n / 365.25 for i in range(self.n_eq)] eq_df_index = np.concatenate( [self.T_eff - (np.cumsum(T_rec_per_asp_adj[i]) - T_rec_per_asp_adj[i] + year_offsets[i]) for i in range(self.n_eq)]) # round the dates to the closest day and combine earthquakes eq_df_index_rounded = np.around(eq_df_index * 365.25) / 365.25 # build a DataFrame with exact and rounded times eq_df = pd.DataFrame(data=temp_slips) eq_df["time"] = eq_df_index eq_df["rounded"] = eq_df_index_rounded # now aggregate by rounded time, keeping the minimum exact time, and summing slip agg_dict = {"time": "min"} agg_dict.update({c: "sum" for c in range(self.fault.n_locked)}) eq_df = eq_df.groupby("rounded").agg(agg_dict) # convert time column to index and sort eq_df.set_index("time", inplace=True) eq_df.sort_index(inplace=True) assert np.allclose(eq_df.sum(axis=0), eq_df.sum(axis=0)[0]) self.eq_df = eq_df """ DataFrame with the dates [decimal year from cycle start] and slips [m] for each asperity """ # fourth, we need to create a list of dates to use internally when evaluating # the earthquake cycle - this is independent of the observation dates i_frac_cumsum = np.concatenate([[self.eq_df.index[-1] - self.T_eff], self.eq_df.index.values]) T_frac = np.diff(i_frac_cumsum) t_eval = np.concatenate( [np.logspace(0, np.log10(1 + T_frac[i]), self.n_samples_per_eq, endpoint=False) - 1 + i_frac_cumsum[i] + j*self.T_eff for j in range(self.n_cycles_max) for i, t in enumerate(T_frac)]) num_neg = (t_eval < 0).sum() t_eval = np.roll(t_eval, -num_neg) t_eval[-num_neg:] += self.n_cycles_max * self.T_eff self.t_eval = np.sort(np.concatenate( [t_eval, np.arange(self.n_cycles_max + 1) * self.T_eff])) """ Internal evaluation timesteps [decimal years since cycle start] """ self.n_eval = self.t_eval.size """ Number of internal evaluation timesteps [-] """ # fifth, for the integration, we need the indices of the timesteps that mark either # an earthquake or the start of a new cycle self.n_slips = self.eq_df.shape[0] """ Number of slips in a sequence [-] """ self.ix_break = [i*(self.n_slips * self.n_samples_per_eq + 1) for i in range(self.n_cycles_max + 1)] """ Indices of breaks between cycles """ self.ix_eq = [self.ix_break[i] + j * self.n_samples_per_eq - num_neg + 1 for i in range(self.n_cycles_max) for j in range(1, 1 + self.n_slips)] """ Indices of earthquakes """ # sixth and last, for the final loop, we need a joint timesteps array between internal # and external (observation) timestamps, such that we can debug, check early stopping, # and restrict the output to the requested timeseries if isinstance(t_obs, pd.DatetimeIndex): t_obs = self.T_eff + (t_obs - pd.Timestamp(self.T_anchor) ).total_seconds().values / 86400 / 365.25 elif isinstance(t_obs, np.ndarray): if np.all(t_obs < 0): # this format is relative to T_anchor and more stable when T_eff varies t_obs = self.T_eff + t_obs assert np.all(t_obs >= 0) and np.all(t_obs < self.T_eff), \ f"Range of 't_obs' ({t_obs.min()}-{t_obs.max():} years) outside of " \ f"the earthquake cycle period ({self.T_eff:} years)." else: raise ValueError("Unknown 't_obs' data type.") self.t_obs = t_obs """ Observation timesteps [decimal years since cycle start] """ # combine all possible timesteps t_obs_shifted = self.t_obs + (self.n_cycles_max - 1) * self.T_eff self.t_eval_joint = np.unique(np.concatenate((self.t_eval, t_obs_shifted))) """ Joint internal evaluation and external observation timesteps [decimal years since cycle start] """ # get indices of each individual subset in the new timesteps array self.ix_break_joint = \ np.flatnonzero(np.isin(self.t_eval_joint, self.t_eval[self.ix_break])) """ Indices of breaks between cycles in joint timesteps """ self.ix_eq_joint = \ np.flatnonzero(np.isin(self.t_eval_joint, self.t_eval[self.ix_eq])) """ Indices of earthquakes in joint timesteps """ self.ix_obs_joint = \ np.flatnonzero(np.isin(self.t_eval_joint, t_obs_shifted)) """ Indices of observation timestamps in joint timesteps """ # get vectors of upper plate rheology parameters if isinstance(self.fault.upper_rheo, RateStateSteadyLogarithmic): # alpha_h self.alpha_h_vec = \ self.fault.upper_rheo.get_param_vectors( -self.fault.mid_x2_creeping[:self.fault.n_creeping_upper]) r""" Depth-variable :math:`(a - b) * \sigma_E` [Pa] of upper plate interface """ elif isinstance(self.fault.upper_rheo, NonlinearViscous): # A, alpha_n, and n alpha_n_vec, n_vec, A_vec = \ self.fault.upper_rheo.get_param_vectors( -self.fault.mid_x2_creeping[:self.fault.n_creeping_upper], self.v_plate) self.alpha_n_vec = alpha_n_vec r""" Depth-variable :math:`\alpha_n` [Pa^n * s/m] of upper plate interface """ self.n_vec = n_vec r""" Depth-variable :math:`n` [-] of upper plate interface """ self.A_vec = A_vec r""" Depth-variable :math:`A ` [Pa * (s/m)^(1/n)] of upper plate interface """ else: raise NotImplementedError # get unbounded delta_tau self.delta_tau_unbounded = self.fault.K_ext @ self.eq_df.values.T """ Unbounded coseismic stress change [Pa] """ # get pseudoinverse of K_int for tapered slip self.K_int_inv_upper = np.linalg.pinv( self.fault.K_int[:self.fault.n_creeping_upper, :self.fault.n_creeping_upper]) """ Inverse of K_int [m/Pa] """ self.delta_tau_max_from_v_max_lower = \ ((self.fault.lower_rheo.alpha_n * self.v_max)**(1 / self.fault.lower_rheo.n) - (self.fault.lower_rheo.alpha_n * self.v_plate)**(1 / self.fault.lower_rheo.n) if self.fault.lower_rheo is not None else np.inf) """ Maximum shear stress change [Pa] in lower plate from capped velocity """ if isinstance(self.fault.upper_rheo, NonlinearViscous): delta_tau_max_from_v_max_upper = \ (self.alpha_n_vec * self.v_max)**(1 / self.n_vec) - \ (self.alpha_n_vec * self.v_plate)**(1 / self.n_vec) elif isinstance(self.fault.upper_rheo, RateStateSteadyLogarithmic): delta_tau_max_from_v_max_upper = self.alpha_h_vec * \ (np.log(self.v_max / self.fault.upper_rheo.v_0) - np.log(self.v_plate / self.fault.upper_rheo.v_0)) self.delta_tau_max_from_v_max_upper = delta_tau_max_from_v_max_upper """ Maximum shear stress change [Pa] in upper plate from capped velocity """ self.delta_tau_max_joint_upper = np.fmin(self.delta_tau_max, self.delta_tau_max_from_v_max_upper) """ Joint maximum shear stress change [Pa] allowed in upper plate """ self.delta_tau_max_joint_lower = \ (min(self.delta_tau_max, self.delta_tau_max_from_v_max_lower) if self.fault.lower_rheo is not None else np.inf) """ Joint maximum shear stress change [Pa] allowed in lower plate """ # create tapered slip by making delta_tau linearly increase until delta_tau_max delta_tau_bounded = self.delta_tau_unbounded.copy() delta_tau_bounded[:self.fault.n_creeping_upper, :] = \ np.fmin(self.delta_tau_max_joint_upper.reshape(-1, 1), self.delta_tau_unbounded[:self.fault.n_creeping_upper, :]) self.delta_tau_bounded = delta_tau_bounded """ Bounded coseismic stress change [Pa] """ # get the additional slip self.slip_taper = (self.K_int_inv_upper @ (self.delta_tau_bounded - self.delta_tau_unbounded )[:self.fault.n_creeping_upper, :]) # check if the lower plate should have been bounded as well if self.fault.lower_rheo is not None: assert not np.any(np.abs(self.delta_tau_bounded[self.fault.n_creeping_upper:, :]) > self.delta_tau_max_joint_lower), \ ("Maximum stress change delta_tau_bounded " f"{np.max(np.abs(self.delta_tau_bounded)):.2e} Pa in lower interface " f"above delta_tau_max = {self.delta_tau_max_joint_lower:.2e} Pa") self.slip_taper_ts = \ pd.DataFrame(index=self.eq_df.index, data=self.slip_taper.T) \ .cumsum(axis=0).reindex(index=self.t_obs, method="ffill", fill_value=0) """ Timeseries of tapered slip [m] on the upper creeping fault patches """ # need the imagined location and orientation of the deep creep patches self.largehalflen = float(largehalflen) """ Fault patch half-length of the deep crreep patches [m] """ self.mid_deep_x1 = \ np.array([self.fault.mid_x1[self.fault.n_upper - 1] + np.cos(self.fault.theta_vec[self.fault.n_upper - 1]) * self.fault.halflen_vec[self.fault.n_upper - 1] + np.cos(self.fault.theta_vec[self.fault.n_upper - 1]) * self.largehalflen, self.fault.mid_x1[self.fault.n_upper + self.fault.n_lower_left - 1] - self.fault.halflen_vec[self.fault.n_upper + self.fault.n_lower_left - 1] - self.largehalflen, self.fault.mid_x1[-1] + np.cos(self.fault.theta_vec[-1] - np.pi) * self.fault.halflen_vec[-1] + np.cos(self.fault.theta_vec[-1] - np.pi) * self.largehalflen]) """ :math:`x_1` coordinates of deep creep fault patch midpoints [m] """ self.mid_deep_x2 = \ np.array([self.fault.mid_x2[self.fault.n_upper - 1] - np.sin(self.fault.theta_vec[self.fault.n_upper - 1]) * self.fault.halflen_vec[self.fault.n_upper - 1] - np.sin(self.fault.theta_vec[self.fault.n_upper - 1]) * self.largehalflen, self.fault.mid_x2[self.fault.n_upper + self.fault.n_lower_left - 1], self.fault.mid_x2[-1] - np.sin(self.fault.theta_vec[-1] - np.pi) * self.fault.halflen_vec[-1] - np.sin(self.fault.theta_vec[-1] - np.pi) * self.largehalflen]) """ :math:`x_2` coordinates of deep creep fault patch midpoints [m] """ self.theta_vec_deep = \ np.array([self.fault.theta_vec[self.fault.n_upper - 1], np.pi, self.fault.theta_vec[-1]]) """ Plate dip angle [rad] for deep creep fault patches """ # create the Green's matrices self.pts_surf = pts_surf """ :math:`x_1` coordinates of surface observation points [m] """ self.n_stations = self.pts_surf.size """ Number of surface observing stations """ self.G_surf_fault = Glinedisp( self.pts_surf, 0, self.fault.mid_x1, self.fault.mid_x2, self.fault.halflen_vec, self.fault.theta_vec, self.fault.nu )[:, :self.fault.mid_x1.size] """ Green's matrix [-] relating slip on the main fault patches to surface motion """ self.G_surf_deep = Glinedisp( self.pts_surf, 0, self.mid_deep_x1, self.mid_deep_x2, self.largehalflen, self.theta_vec_deep, self.fault.nu)[:, :3] """ Green's matrix [-] relating slip on the deep creep patches to surface motion """ self.G_surf = np.hstack([self.G_surf_fault, self.G_surf_deep]) """ Joint Green's matrix [-] relating slip on the entire ESPM to surface motion """ # calculate the best initial velocity state from the steady state ODE v_plate_vec = np.ones(self.fault.n_creeping) * self.v_plate v_plate_vec[self.fault.n_creeping_upper:] *= -1 self.v_plate_vec = v_plate_vec """ Vector with the plate velocity for each creeping patch [m/s] """ # get the initial velocity, taking advantage of the option that there could be a # deep transition zone v_init = v_plate_vec.copy() if self.fault.upper_rheo.deep_transition is not None: ix_deep = np.argmin(np.abs(-self.fault.mid_x2_creeping[:self.fault.n_creeping_upper] - self.fault.upper_rheo.deep_transition - self.fault.upper_rheo.deep_transition_width)) if isinstance(self.fault.upper_rheo, RateStateSteadyLogarithmic): v_init[:ix_deep] = np.linspace(self.v_plate * 1e-6, self.v_plate, num=ix_deep, endpoint=False) elif isinstance(self.fault.upper_rheo, NonlinearViscous): v_init[:ix_deep] = np.linspace(0, self.v_plate, num=ix_deep, endpoint=False) self.v_init = v_init """ Initial velocity in all creeping patches [m/s] """ @property def locked_slip(self): """ Timeseries of slip [m] on the locked patches for observation timespan """ return self.eq_df.cumsum(axis=0) \ .reindex(index=self.t_obs, method="ffill", fill_value=0).values.T @property def deep_creep_slip(self): """ Timeseries of slip [m] on the deep creep patches for observation timestamps """ return (np.tile(self.t_obs.reshape(1, -1), (3, 1)) * np.array([1, -1, -1]).reshape(3, 1) * self.v_plate_eff * 86400 * 365.25) @staticmethod def read_config_file(config_file): """ Read a configuration file and return it as a parsed dictionary. Parameters ---------- config_file : str Path to INI configuration file. Returns ------- cfg_dict : dict Parsed configuration file. """ # load configuration file cfg = configparser.ConfigParser() cfg.optionxform = str with open(config_file, mode="rt") as f: cfg.read_file(f) cfg_seq, cfg_fault, cfg_mesh = cfg["sequence"], cfg["fault"], cfg["mesh"] # parse rheologies upper_rheo_dict = dict(cfg["upper_rheo"]) upper_rheo_type = upper_rheo_dict.pop("type") upper_rheo_kw_args = {k: float(v) for k, v in upper_rheo_dict.items()} try: lower_rheo_dict = dict(cfg["lower_rheo"]) except KeyError: lower_rheo_type = None lower_rheo_kw_args = None else: lower_rheo_type = lower_rheo_dict.pop("type") lower_rheo_kw_args = {k: float(v) for k, v in lower_rheo_dict.items()} # parse everything else cfg_dict = { "theta": np.deg2rad(cfg_fault.getfloat("theta_deg")), "D_lock": cfg_fault.getfloat("D_lock"), "H": cfg_fault.getfloat("H"), "nu": cfg_fault.getfloat("nu"), "E": cfg_fault.getfloat("E"), "v_s": cfg_fault.getfloat("v_s"), "halflen": cfg_mesh.getfloat("halflen"), "n_upper": cfg_mesh.getint("n_up"), "n_lower_left": cfg_mesh.getint("n_low_l"), "n_lower_right": cfg_mesh.getint("n_low_r"), "halflen_factor_lower": cfg_mesh.getfloat("halflen_factor_lower"), "D_max": cfg_mesh.getfloat("D_max", fallback=None), "x1_pretrench": cfg_mesh.getfloat("x1_pretrench", fallback=None), "v_plate": cfg_seq.getfloat("v_plate"), "n_cycles_max": cfg_seq.getint("n_cycles_max"), "n_samples_per_eq": cfg_seq.getint("n_samples_per_eq"), "delta_tau_max": cfg_fault.getfloat("delta_tau_max", fallback=np.inf), "v_max": cfg_fault.getfloat("v_max", fallback=np.inf), "Ds_0": np.atleast_1d(json.loads(cfg_seq["Ds_0"])), "Ds_0_logsigma": np.atleast_1d(json.loads(cfg_seq["Ds_0_logsigma"])), "T_rec": np.atleast_1d(json.loads(cfg_seq["T_rec"])), "T_rec_logsigma": np.atleast_1d(json.loads(cfg_seq["T_rec_logsigma"])), "D_asp_min": np.atleast_1d(json.loads(cfg_seq["D_asp_min"])), "D_asp_max": np.atleast_1d(json.loads(cfg_seq["D_asp_max"])), "T_anchor": cfg_seq.get("T_anchor"), "T_last": json.loads(cfg_seq["T_last"]), "enforce_v_plate": cfg_seq.getboolean("enforce_v_plate"), "largehalflen": cfg_mesh.getfloat("largehalflen"), "upper_rheo_type": upper_rheo_type, "lower_rheo_type": lower_rheo_type, "upper_rheo_kw_args": upper_rheo_kw_args, "lower_rheo_kw_args": lower_rheo_kw_args } return cfg_dict @classmethod def from_config_dict(cls, cfg, t_obs, pts_surf): """ Create a SubductionSimulation object from a configuration dictionary. Parameters ---------- cfg : dict Dictionary containing all parsed elements from the configuration file t_obs : numpy.ndarray, pandas.DatetimeIndex Observation timesteps, either as decimal years relative to the cycle start, or as Timestamps pts_surf : numpy.ndarray Horizontal landward observation coordinates [m] relative to the trench See Also -------- read_config_file : To load a configuration file into a dictionary. """ # create rheology objects upper_rheo = globals()[cfg["upper_rheo_type"]](**cfg["upper_rheo_kw_args"]) if cfg["lower_rheo_type"] is None: lower_rheo = None else: lower_rheo = globals()[cfg["lower_rheo_type"]](**cfg["lower_rheo_kw_args"]) # create fault object fault = Fault2D(theta=cfg["theta"], D_lock=cfg["D_lock"], H=cfg["H"], nu=cfg["nu"], E=cfg["E"], v_s=cfg["v_s"], halflen=cfg["halflen"], upper_rheo=upper_rheo, n_upper=cfg["n_upper"], lower_rheo=lower_rheo, n_lower_left=cfg["n_lower_left"], n_lower_right=cfg["n_lower_right"], halflen_factor_lower=cfg["halflen_factor_lower"], D_max=cfg["D_max"], x1_pretrench=cfg["x1_pretrench"]) # create simulation object return cls(v_plate=cfg["v_plate"], n_cycles_max=cfg["n_cycles_max"], n_samples_per_eq=cfg["n_samples_per_eq"], delta_tau_max=cfg["delta_tau_max"], v_max=cfg["v_max"], fault=fault, Ds_0=cfg["Ds_0"], Ds_0_logsigma=cfg["Ds_0_logsigma"], T_rec=cfg["T_rec"], T_rec_logsigma=cfg["T_rec_logsigma"], D_asp_min=cfg["D_asp_min"], D_asp_max=cfg["D_asp_max"], T_anchor=cfg["T_anchor"], T_last=cfg["T_last"], enforce_v_plate=cfg["enforce_v_plate"], largehalflen=cfg["largehalflen"], t_obs=t_obs, pts_surf=pts_surf) @staticmethod def get_n(alpha_n, alpha_eff, v_eff): r""" Calculate the real linear viscous strength constant from the effective one. Parameters ---------- alpha_n : float Nonlinear viscous rheology strength constant :math:`\alpha_n` [Pa^n * s/m] alpha_eff : float Effective linear viscous strength constant [Pa * s/m] v_eff : float Effective velocity [m/s] used for ``alpha_eff`` conversions Returns ------- n : float Power-law exponent :math:`n` [-] """ return (np.log(alpha_n) + np.log(v_eff)) / (np.log(alpha_eff) + np.log(v_eff)) @staticmethod def get_alpha_n(alpha_eff, n, v_eff): r""" Calculate the real linear viscous strength constant from the effective one. Parameters ---------- alpha_eff : float Effective linear viscous strength constant [Pa * s/m] n : float Power-law exponent :math:`n` [-] v_eff : float Effective velocity [m/s] used for ``alpha_eff`` conversions Returns ------- alpha_n : float Nonlinear viscous rheology strength constant :math:`\alpha_n` [Pa^n * s/m] """ alpha_n = alpha_eff**n * v_eff**(n-1) return alpha_n @staticmethod def get_alpha_eff(alpha_n, n, v_eff): r""" Calculate the effective linear viscous strength constant from the real one. Parameters ---------- alpha_n : float Nonlinear viscous rheology strength constant :math:`\alpha_n` [Pa^n * s/m] n : float Power-law exponent :math:`n` [-] v_eff : float Effective velocity [m/s] used for ``alpha_eff`` conversions Returns ------- alpha_eff : float Effective linear viscous strength constant [Pa * s/m] """ if isinstance(v_eff, np.ndarray): temp = v_eff.copy() temp[temp == 0] = np.NaN else: temp = v_eff alpha_eff = alpha_n**(1/n) * temp**((1-n)/n) return alpha_eff @staticmethod def get_alpha_eff_from_alpha_h(alpha_h, v_eff): r""" Calculate the effective viscosity from the rate-dependent friction. Parameters ---------- alpha_h : float Rate-and-state parameter :math:`(a - b) * \sigma_E`, where :math:`a` and :math:`b` [-] are the rate-and-state frictional properties, and :math:`\sigma_E` [Pa] is effective fault normal stress. v_eff : float Effective velocity [m/s] used for ``alpha_eff`` conversions Returns ------- alpha_eff : float Effective linear viscous strength constant [Pa * s/m] """ if isinstance(v_eff, np.ndarray): temp = v_eff.copy() temp[temp == 0] = np.NaN else: temp = v_eff alpha_eff = alpha_h / temp return alpha_eff def run(self, simple_rk4=False): """ Run a full simulation. """ # run forward integration if self.fault.lower_rheo is None: if isinstance(self.fault.upper_rheo, RateStateSteadyLogarithmic): full_state = flat_run_rdlog( self.t_eval_joint * 86400 * 365.25, self.ix_break_joint, self.ix_eq_joint, self.fault.n_creeping_upper, self.fault.n_creeping_lower, self.fault.K_int, self.fault.K_ext, self.v_plate_vec, self.v_init, self.slip_taper, self.delta_tau_bounded, self.fault.upper_rheo.v_0, self.alpha_h_vec, self.fault.mu_over_2vs) elif isinstance(self.fault.upper_rheo, NonlinearViscous): full_state = flat_run_plvis( self.t_eval_joint * 86400 * 365.25, self.ix_break_joint, self.ix_eq_joint, self.fault.n_creeping_upper, self.fault.n_creeping_lower, self.fault.K_int, self.fault.K_ext, self.v_plate_vec, self.v_init, self.slip_taper, self.delta_tau_bounded, self.alpha_n_vec, self.n_vec, self.A_vec, self.fault.mu_over_2vs) else: raise NotImplementedError elif isinstance(self.fault.lower_rheo, NonlinearViscous): if isinstance(self.fault.upper_rheo, NonlinearViscous): full_state = flat_run_plvis_plvis( self.t_eval_joint * 86400 * 365.25, self.ix_break_joint, self.ix_eq_joint, self.fault.n_creeping_upper, self.fault.n_creeping_lower, self.fault.K_int, self.fault.K_ext, self.v_plate_vec, self.v_init, self.slip_taper, self.delta_tau_bounded, self.fault.upper_rheo.alpha_n, self.fault.upper_rheo.n, self.fault.lower_rheo.alpha_n, self.fault.lower_rheo.n, simple_rk4) elif isinstance(self.fault.upper_rheo, RateStateSteadyLogarithmic): full_state = flat_run_rdlog_plvis( self.t_eval_joint * 86400 * 365.25, self.ix_break_joint, self.ix_eq_joint, self.fault.n_creeping_upper, self.fault.n_creeping_lower, self.fault.K_int, self.fault.K_ext, self.v_plate_vec, self.v_init, self.slip_taper, self.delta_tau_bounded, self.fault.upper_rheo.v_0, self.fault.upper_rheo.alpha_h, self.fault.lower_rheo.alpha_n, self.fault.lower_rheo.n, simple_rk4) else: raise NotImplementedError else: raise NotImplementedError # extract the observations that were actually requested obs_state = full_state[:, self.ix_obs_joint].copy() # since we're only calculating transient surface displacements, need to # remove the tapered slip due to bounded stresses obs_state[:self.fault.n_creeping_upper, :] -= self.slip_taper_ts.values.T # convert to surface displacements surf_disps = get_surface_displacements_plvis_plvis( obs_state, self.fault.n_creeping_upper, self.fault.n_creeping_lower, np.ascontiguousarray(self.G_surf[:, self.fault.n_locked:]), self.deep_creep_slip) return full_state, obs_state, surf_disps def zero_obs_at_eq(self, surf_disps): """ Reset to zero the surface displacement timeseries every time an earthquake happens. """ obs_zeroed = surf_disps.copy() slips_obs = np.logical_and(self.t_obs.min() <= self.eq_df.index, self.t_obs.max() > self.eq_df.index) n_slips_obs = slips_obs.sum() if n_slips_obs == 0: obs_zeroed -= obs_zeroed[:, 0].reshape(-1, 1) else: i_slips_obs = [np.argmax(self.t_obs >= t_eq) for t_eq in self.eq_df.index.values[slips_obs]] obs_zeroed[:, :i_slips_obs[0]] -= obs_zeroed[:, i_slips_obs[0] - 1].reshape(-1, 1) obs_zeroed[:, i_slips_obs[0]:] -= obs_zeroed[:, i_slips_obs[0]].reshape(-1, 1) for i in range(1, n_slips_obs): obs_zeroed[:, i_slips_obs[i]:] -= obs_zeroed[:, i_slips_obs[i]].reshape(-1, 1) return obs_zeroed def _reduce_full_state(self, data): # get all NaN columns cols_all_nan = np.all(np.isnan(data), axis=0) # check if there was early stopping if cols_all_nan.sum() > 0: # get the border indices where integrations have been skipped ix_last, ix_first = np.flatnonzero(cols_all_nan)[[0, -1]] ix_last -= 1 ix_first += 1 # get indices before and after the NaN period ix_valid = np.r_[0:ix_last, ix_first:self.t_eval_joint.size] # subset data data = data[:, ix_valid] t_sub = self.t_eval_joint[ix_valid].copy() t_sub[ix_last:] -= self.t_eval_joint[ix_first] - self.t_eval_joint[ix_last] n_cyc_completed = int(np.round(self.t_eval_joint[ix_last] / self.T_eff)) + 1 else: t_sub = self.t_eval_joint.copy() n_cyc_completed = self.n_cycles_max + 1 # done return data, t_sub, n_cyc_completed def plot_surface_displacements(self, obs_zeroed, obs_noisy=None): """ Plot the observers' surface displacement timeseries. Parameters ---------- obs_zeroed : numpy.ndarray Surface displacements as output by :meth:`~zero_obs_at_eq`. obs_noisy : numpy.ndarray, optional Noisy surface observations. Returns ------- matplotlib.figure.Figure matplotlib.axes.Axes """ import matplotlib.pyplot as plt # some helper variables isort = np.argsort(self.pts_surf) i_off = 3 * np.std(obs_zeroed.ravel()) # get float dates of observed earthquakes slips_obs = np.logical_and(self.t_obs.min() <= self.eq_df.index, self.t_obs.max() > self.eq_df.index) n_slips_obs = slips_obs.sum() if n_slips_obs > 0: i_slips_obs = [np.argmax(self.t_obs >= t_eq) for t_eq in self.eq_df.index.values[slips_obs]] t_last_slips = [self.t_obs[islip] for islip in i_slips_obs] else: t_last_slips = [] # start plot fig, ax = plt.subplots(nrows=2, sharex=True, layout="constrained") for tslip in t_last_slips: ax[0].axvline(tslip, c="0.7", zorder=-1) ax[1].axvline(tslip, c="0.7", zorder=-1) for i, ix in enumerate(isort): if obs_noisy is not None: ax[0].plot(self.t_obs, obs_noisy[ix, :] + i*i_off, ".", c="k", rasterized=True) ax[1].plot(self.t_obs, obs_noisy[ix + self.n_stations, :] + i*i_off, ".", c="k", rasterized=True) ax[0].plot(self.t_obs, obs_zeroed[ix, :] + i*i_off, c=f"C{i}") ax[1].plot(self.t_obs, obs_zeroed[ix + self.n_stations, :] + i*i_off, c=f"C{i}") ax[1].set_xlabel("Time") ax[0].set_ylabel("Horizontal [m]") ax[1].set_ylabel("Vertical [m]") fig.suptitle("Surface Displacement") return fig, ax def plot_fault_velocities(self, full_state): """ Plot the velocities on all creeping fault patches. Parameters ---------- full_state : numpy.ndarray State matrix as output from :meth:`~run`. Returns ------- matplotlib.figure.Figure matplotlib.axes.Axes """ import matplotlib.pyplot as plt from matplotlib.colors import SymLogNorm from cmcrameri import cm # extract velocities vels = full_state[np.r_[self.fault.n_creeping_upper:self.fault.n_state_upper, self.fault.n_state_upper + self.fault.n_creeping_lower: self.fault.n_state_upper + self.fault.n_state_lower], :] / self.v_plate # check whether the simulation spun up, and NaN data needs to be skipped vels, t_sub, n_cyc_completed = self._reduce_full_state(vels) # normalize time t_sub /= self.T_eff # prepare plot norm = SymLogNorm(linthresh=1, vmin=-1, vmax=100) if self.fault.lower_rheo is None: fig, ax = plt.subplots(figsize=(10, 5), layout="constrained") ax = [ax] else: fig, ax = plt.subplots(nrows=2, sharex=True, figsize=(10, 5), layout="constrained") # plot velocities c = ax[0].pcolormesh(t_sub, self.fault.end_upper[0, self.fault.n_locked:] / 1e3, vels[:self.fault.n_creeping_upper, :-1], norm=norm, cmap=cm.vik, shading="flat") ax[0].set_yticks(self.fault.end_upper[0, [self.fault.n_locked, -1]] / 1e3) # add vertical lines for cycle breaks for n in range(1, n_cyc_completed): ax[0].axvline(n, c="k", lw=1) # make the y-axis increasing downwards to mimic depth even though we're plotting x1 ax[0].invert_yaxis() # repeat for lower interface, if simulated if self.fault.lower_rheo is not None: c = ax[1].pcolormesh(t_sub, self.fault.end_lower[0, :] / 1e3, -vels[self.fault.n_creeping_upper:, :-1], norm=norm, cmap=cm.vik, shading="flat") ax[1].set_yticks(self.fault.end_lower[0, [0, -1]] / 1e3) # add horizontal lines to show where the lower interface is below the locked zone ax[1].axhline(0, c="k", lw=1) ax[1].axhline(self.fault.x1_lock / 1e3, c="k", lw=1) for n in range(1, n_cyc_completed): ax[1].axvline(n, c="k", lw=1) ax[1].invert_yaxis() # finish figure if self.fault.lower_rheo is None: ax[0].set_ylabel("Upper Interface\n$x_1$ [km]") ax[0].set_xlabel("Normalized Time $t/T$") else: ax[0].set_ylabel("Upper Interface\n$x_1$ [km]") ax[1].set_ylabel("Lower Interface\n$x_1$ [km]") ax[1].set_xlabel("Normalized Time $t/T$") fig.colorbar(c, ax=ax, location="right", orientation="vertical", fraction=0.05, label="$v/v_{plate}$") fig.suptitle("Normalized Fault Patch Velocities") return fig, ax def plot_fault_slip(self, full_state, deficit=True, include_locked=True, include_deep=True): """ Plot the cumulative slip (deficit) for the fault patches. Parameters ---------- full_state : numpy.ndarray State matrix as output from :meth:`~run`. deficit : bool, optional If ``True`` (default), remove the plate velocity to plot slip deficit, otherwise keep it included. include_locked : bool, optional If ``True`` (default), also plot the slip on the locked patches. include_deep : bool, optional If ``True`` (default), also plot the slip on the semi-infinite patches at the end of the interfaces. Returns ------- matplotlib.figure.Figure matplotlib.axes.Axes """ import matplotlib.pyplot as plt from matplotlib.colors import Normalize, SymLogNorm from cmcrameri import cm # extract slip slip = full_state[np.r_[:self.fault.n_creeping_upper, self.fault.n_state_upper: self.fault.n_state_upper + self.fault.n_creeping_lower], :] # check whether the simulation spun up, and NaN data needs to be skipped slip, t_sub, n_cyc_completed = self._reduce_full_state(slip) # normalize to slip per full cycle cum_slip_per_cycle = self.v_plate_eff * self.T_eff * 86400 * 365.25 slip /= cum_slip_per_cycle # add optional slip histories, if desired if include_locked: eq_df_joint = pd.DataFrame( index=(self.eq_df.index.values.reshape(1, -1) + self.T_eff * np.arange(n_cyc_completed).reshape(-1, 1) ).ravel(), data=np.tile(self.eq_df.values, (n_cyc_completed, 1))) locked_slip = eq_df_joint.cumsum(axis=0) \ .reindex(index=t_sub, method="ffill", fill_value=0).values.T locked_slip /= cum_slip_per_cycle if include_deep: deep_creep_slip = (np.tile(t_sub.reshape(1, -1), (3, 1)) * np.array([1, -1, -1]).reshape(3, 1) * self.v_plate_eff * 86400 * 365.25) deep_creep_slip /= cum_slip_per_cycle # remove plate velocity to get slip deficit, if desired if deficit: cmap = cm.vik norm = SymLogNorm(linthresh=1e-2, vmin=-1, vmax=1) slip[:self.fault.n_creeping_upper] -= t_sub.reshape(1, -1) / self.T_eff slip[self.fault.n_creeping_upper:] += t_sub.reshape(1, -1) / self.T_eff slip -= slip[:, -2].reshape(-1, 1) if include_locked: locked_slip -= t_sub.reshape(1, -1) / self.T_eff if include_deep: deep_creep_slip -= (t_sub.reshape(1, -1) * np.array([1, -1, -1]).reshape(3, 1)) / self.T_eff else: norm = Normalize(vmin=0, vmax=n_cyc_completed) cmap = cm.batlow # normalize time t_sub /= self.T_eff # prepare figure nrows = (1 + int(self.fault.lower_rheo is not None) + int(include_locked) + int(include_deep) * 3) hr_locked = ((self.fault.end_upper[0, self.fault.n_locked] - self.fault.end_upper[0, 0]) / (self.fault.end_lower[0, -1] - self.fault.end_lower[0, 0])) hr_lower = ((self.fault.end_lower[0, -1] - self.fault.end_lower[0, 0]) / (self.fault.end_upper[0, -1] - self.fault.end_upper[0, self.fault.n_locked])) hr = ([hr_locked] * int(include_locked) + [1] + [hr_locked, hr_locked] * int(include_deep) + [hr_lower] * int(self.fault.lower_rheo is not None) + [hr_locked] * int(include_deep)) fig, ax = plt.subplots(nrows=nrows, sharex=True, gridspec_kw={"height_ratios": hr}, figsize=(10, 5), layout="constrained") iax = 0 # plot locked if include_locked: c = ax[iax].pcolormesh(t_sub, self.fault.end_upper[0, :self.fault.n_locked + 1] / 1e3, locked_slip[:, :-1], norm=norm, cmap=cmap, shading="flat") ax[iax].set_ylabel("Locked\n$x_1$ [km]") temp_x1 = self.fault.end_upper[0, [0, self.fault.n_locked]] / 1e3 ax[iax].set_yticks(temp_x1, [f"{x:.0f}" for x in temp_x1]) iax += 1 # plot upper creeping c = ax[iax].pcolormesh(t_sub, self.fault.end_upper[0, self.fault.n_locked:] / 1e3, slip[:self.fault.n_creeping_upper, :-1], norm=norm, cmap=cmap, shading="flat") ax[iax].set_ylabel("Creeping\n$x_1$ [km]") temp_x1 = self.fault.end_upper[0, [self.fault.n_locked, -1]] / 1e3 ax[iax].set_yticks(temp_x1, [f"{x:.0f}" for x in temp_x1]) iax += 1 # plot end patch on upper interface if include_deep: temp_x1 = np.array([self.fault.end_upper[0, -1], self.mid_deep_x1[0]]) / 1e3 c = ax[iax].pcolormesh(t_sub, temp_x1, deep_creep_slip[0, :-1].reshape(1, -1), norm=norm, cmap=cmap, shading="flat") ax[iax].set_ylabel("Deep Creep\n$x_1$ [km]") ax[iax].set_yticks(temp_x1, [f"{temp_x1[0]:.0f}", "$-\\infty$"]) iax += 1 # plot left end patch on lower interface if include_deep: temp_x1 = np.array([self.mid_deep_x1[1], self.fault.end_lower[0, 0]]) / 1e3 c = ax[iax].pcolormesh(t_sub, temp_x1, -deep_creep_slip[1, :-1].reshape(1, -1), norm=norm, cmap=cmap, shading="flat") ax[iax].set_ylabel("Deep Creep\n$x_1$ [km]") ax[iax].set_yticks(temp_x1, ["$-\\infty$", f"{temp_x1[1]:.0f}"]) iax += 1 # plot lower creeping if self.fault.lower_rheo is not None: c = ax[iax].pcolormesh(t_sub, self.fault.end_lower[0, :] / 1e3, -slip[self.fault.n_creeping_upper:, :-1], norm=norm, cmap=cmap, shading="flat") ax[iax].axhline(0, c="k", lw=1) ax[iax].axhline(self.fault.x1_lock / 1e3, c="k", lw=1) ax[iax].set_ylabel("Creeping\n$x_1$ [km]") temp_x1 = self.fault.end_lower[0, [0, -1]] / 1e3 ax[iax].set_yticks(temp_x1, [f"{x:.0f}" for x in temp_x1]) iax += 1 # plot right end patch on lower interface if include_deep: temp_x1 = np.array([self.fault.end_lower[0, -1], self.mid_deep_x1[2]]) / 1e3 c = ax[iax].pcolormesh(t_sub, temp_x1, -deep_creep_slip[2, :-1].reshape(1, -1), norm=norm, cmap=cmap, shading="flat") ax[iax].set_ylabel("Deep Creep\n$x_1$ [km]") ax[iax].set_yticks(temp_x1, [f"{temp_x1[0]:.0f}", "$-\\infty$"]) iax += 1 # finish figure for iax in range(len(ax)): for n in range(1, n_cyc_completed): ax[iax].axvline(n, c="k", lw=1) ax[iax].invert_yaxis() ax[-1].set_xlabel("Normalized Time $t/T$") fig.colorbar(c, ax=ax, location="right", orientation="vertical", fraction=0.05, label="$(s - t*v_{plate})/s_{full}$" if deficit else "$s/s_{full}$") suptitle = "Normalized Fault Patch Slip" if deficit: suptitle += " Deficit" fig.suptitle(suptitle) return fig, ax def plot_eq_velocities(self, full_state): """ Plot the before and after velocities on all creeping fault patches for each distinct earthquake. Parameters ---------- full_state : numpy.ndarray State matrix as output from :meth:`~run`. Returns ------- matplotlib.figure.Figure matplotlib.axes.Axes """ import matplotlib.pyplot as plt # get indices of each last earthquake in last cycle temp = self.eq_df.astype(bool).drop_duplicates(keep="last") time_eq_last = temp.index.values + (self.n_cycles_max - 1) * self.T_eff tdiff = np.array([np.min(np.abs(self.t_eval_joint - tlast)) for tlast in time_eq_last]) if np.any(tdiff > 0): warn("Couldn't find exact indices, using time differences of " f"{tdiff * 365.25 * 86400} seconds.") ix_eq_last = [np.argmin(np.abs(self.t_eval_joint - tlast)) for tlast in time_eq_last] n_eq_found = len(ix_eq_last) assert n_eq_found == (self.Ds_0 > 0).sum(), \ "Couldn't find indices of each last non-zero earthquake in the " \ "last cycle, check for rounding errors." # calculate average slip for plotted earthquakes slip_last = self.eq_df.loc[temp.index, :] slip_avg = [slip_last.iloc[ieq, np.flatnonzero(temp.iloc[ieq, :])].mean() for ieq in range(n_eq_found)] # extract velocities vels = full_state[np.r_[self.fault.n_creeping_upper:self.fault.n_state_upper, self.fault.n_state_upper + self.fault.n_creeping_lower: self.fault.n_state_upper + self.fault.n_state_lower], :] / self.v_plate # prepare plot fig, ax = plt.subplots(nrows=n_eq_found, ncols=1 if self.fault.lower_rheo is None else 2, sharey=True, layout="constrained") ax = np.asarray(ax).reshape(n_eq_found, -1) # loop over earthquakes for irow, ieq in enumerate(ix_eq_last): # repeat plot for before and after for ioff, label in enumerate(["before", "after"]): ax[irow, 0].set_yscale("symlog", linthresh=1) ax[irow, 0].plot(self.fault.mid_x1_creeping[:self.fault.n_creeping_upper] / 1e3, vels[:self.fault.n_creeping_upper, ieq - 1 + ioff], c=f"C{ioff}", label=label) if self.fault.lower_rheo is not None: ax[irow, 1].set_yscale("symlog", linthresh=1) ax[irow, 1].plot( self.fault.mid_x1_creeping[self.fault.n_creeping_upper:] / 1e3, -vels[self.fault.n_creeping_upper:, ieq - 1 + ioff], c=f"C{ioff}", label=label) # finish plot for irow in range(n_eq_found): ax[irow, 0].set_title(f"Upper Interface: $s={slip_avg[irow]:.2g}$ m") ax[irow, 0].legend() ax[irow, 0].set_xlabel("$x_1$ [km]") ax[irow, 0].set_ylabel("$v/v_{plate}$") if self.fault.lower_rheo is not None: ax[irow, 1].set_title(f"Lower Interface: $s={slip_avg[irow]:.2g}$ m") ax[irow, 1].axvline(0, c="k", lw=1) ax[irow, 1].axvline(self.fault.x1_lock / 1e3, c="k", lw=1) ax[irow, 1].tick_params(labelleft=True) ax[irow, 1].legend() ax[irow, 1].set_xlabel("$x_1$ [km]") ax[irow, 1].set_ylabel("$v/v_{plate}$") fig.suptitle("Normalized Earthquake Velocity Changes") return fig, ax def plot_fault(self): """ Plot the fault. Returns ------- matplotlib.figure.Figure matplotlib.axes.Axes """ import matplotlib.pyplot as plt fig, ax = plt.subplots(figsize=(10, 3), layout="constrained") ax.plot(self.fault.end_upper[0, :self.fault.n_locked + 1]/1e3, self.fault.end_upper[1, :self.fault.n_locked + 1]/1e3, marker="|", markeredgecolor="k", label="Locked") ax.plot(self.fault.end_upper[0, self.fault.n_locked:]/1e3, self.fault.end_upper[1, self.fault.n_locked:]/1e3, marker="|", markeredgecolor="k", label="Upper Creeping") ax.plot(self.fault.end_lower[0, :]/1e3, self.fault.end_lower[1, :]/1e3, marker="|", markeredgecolor="k", label="Lower Creeping") ax.plot(self.pts_surf / 1e3, np.zeros_like(self.pts_surf), "^", markeredgecolor="none", markerfacecolor="k", label="Observers") ax.axhline(0, lw=1, c="0.5", zorder=-1) ax.legend() ax.set_xlabel("$x_1$ [km]") ax.set_ylabel("$x_2$ [km]") ax.set_title("Fault Mesh and Observer Locations") ax.set_aspect("equal") return fig, ax def plot_slip_phases(self, full_state, post_inter_transition=0.01, normalize=True): """ Plot the cumulative slip on the fault for the three different phases (coseismic, early postseismic, and interseismic). Only works if there is a single earthquake in the sequence. Parameters ---------- full_state : numpy.ndarray State matrix as output from :meth:`~run`. post_inter_transition : float, optional Fraction of the recurrence time that should be considered early postseismic and not interseismic. Returns ------- matplotlib.figure.Figure matplotlib.axes.Axes """ import matplotlib.pyplot as plt from scipy.interpolate import interp1d # check that the sequence only has one earthquake if not self.n_eq == 1: raise NotImplementedError("Don't know how to plot slip phases if " "multiple earthquakes are present in the sequence.") # get coseismic slip co = np.concatenate([self.eq_df.values.ravel(), self.slip_taper.ravel()]) # get index of last earthquake in last cycle time_eq_last = self.eq_df.index[0] + (self.n_cycles_max - 1) * self.T_eff ix_eq_last = (np.flatnonzero(np.isin(self.t_eval_joint, time_eq_last))[0] - self.ix_break_joint[-2]) # reorganize interseismic slip slip = full_state[:self.fault.n_creeping_upper, self.ix_break_joint[-2]:] slip_pre = slip[:, :ix_eq_last] slip_post = slip[:, ix_eq_last:] slip_pre += (slip_post[:, -1] - slip_pre[:, 0]).reshape(-1, 1) slip_joint = np.hstack([slip_post, slip_pre]) slip_joint -= slip_joint[:, 0].reshape(-1, 1) # same for time t_last = self.t_eval_joint[self.ix_break_joint[-2]:].copy() t_last_pre = t_last[:ix_eq_last] t_last_post = t_last[ix_eq_last:] t_last_pre += t_last_post[-1] - t_last_pre[0] t_last_joint = np.concatenate([t_last_post, t_last_pre]) t_last_joint -= t_last_joint[0] # since slip_joint is now already cumulative slip since the earthquake, # with the tapered slip removed, we can just read out the early # postseismic and rest interseismic cumulative slip distributions post = interp1d(t_last_joint, slip_joint)(post_inter_transition * self.T_eff) inter = slip_joint[:, -1] - post post = np.concatenate([np.zeros(self.fault.n_locked), post]) inter = np.concatenate([np.zeros(self.fault.n_locked), inter]) # optionally, normalize by total expected cumulative slip over the entire cycle if normalize: total_slip = self.T_eff * self.v_plate * 86400 * 365.25 co /= total_slip post /= total_slip inter /= total_slip # make figure fig, ax = plt.subplots(layout="constrained") ax.plot(self.fault.mid_x1[:self.fault.n_upper] / 1e3, co, label="Coseismic") ax.plot(self.fault.mid_x1[:self.fault.n_upper] / 1e3, post, label="Postseismic") ax.plot(self.fault.mid_x1[:self.fault.n_upper] / 1e3, inter, label="Interseismic") ax.legend() ax.set_xlabel("$x_1$ [km]") ax.set_ylabel("Normalized cumulative slip [-]" if normalize else "Cumulative Slip [m]") ax.set_title("Slip Phases (Post-/Interseismic cutoff at " f"{post_inter_transition:.1%} " "$T_{rec}$)") return fig, ax def plot_viscosity(self, full_state, return_viscosities=False): """ Plot the viscosity structure with depth for the steady state, as well as for the immediate pre- and coseismic velocities. For multiple earthquakes, it will use the minimum preseismic and maximum postseismic velocities. Parameters ---------- full_state : numpy.ndarray State matrix as output from :meth:`~run`. return_viscosities : bool, optional Also return the preseismic, steady-state, and postseismic viscosities. Returns ------- matplotlib.figure.Figure matplotlib.axes.Axes """ import matplotlib.pyplot as plt # get indices of each last earthquake in last cycle temp = self.eq_df.astype(bool).drop_duplicates(keep="last") time_eq_last = temp.index.values + (self.n_cycles_max - 1) * self.T_eff tdiff = np.array([np.min(np.abs(self.t_eval_joint - tlast)) for tlast in time_eq_last]) if np.any(tdiff > 0): warn("Couldn't find exact indices, using time differences of " f"{tdiff * 365.25 * 86400} seconds.") ix_eq_last = [np.argmin(np.abs(self.t_eval_joint - tlast)) for tlast in time_eq_last] n_eq_found = len(ix_eq_last) assert n_eq_found == (self.Ds_0 > 0).sum(), \ "Couldn't find indices of each last non-zero earthquake in the " \ "last cycle, check for rounding errors." # calculate average slip for plotted earthquakes slip_last = self.eq_df.loc[temp.index, :] slip_avg = [slip_last.iloc[ieq, np.flatnonzero(temp.iloc[ieq, :])].mean() for ieq in range(n_eq_found)] # extract preseismic velocities vels_pre = np.array([full_state[self.fault.n_creeping_upper:self.fault.n_state_upper, ix - 1] for ix in ix_eq_last]).T vels_post = np.array([full_state[self.fault.n_creeping_upper:self.fault.n_state_upper, ix] for ix in ix_eq_last]).T if isinstance(self.fault.upper_rheo, NonlinearViscous): # calculate viscosity profiles vis_pre = SubductionSimulation.get_alpha_eff(self.alpha_n_vec.reshape(-1, 1), self.n_vec.reshape(-1, 1), vels_pre) vis_ss = SubductionSimulation.get_alpha_eff(self.alpha_n_vec, self.n_vec, self.v_plate_eff) vis_post = SubductionSimulation.get_alpha_eff(self.alpha_n_vec.reshape(-1, 1), self.n_vec.reshape(-1, 1), vels_post) elif isinstance(self.fault.upper_rheo, RateStateSteadyLogarithmic): vis_pre = SubductionSimulation.get_alpha_eff_from_alpha_h( self.alpha_h_vec.reshape(-1, 1), vels_pre) vis_ss = SubductionSimulation.get_alpha_eff_from_alpha_h( self.alpha_h_vec.reshape(-1, 1), self.v_plate_eff) vis_post = SubductionSimulation.get_alpha_eff_from_alpha_h( self.alpha_h_vec.reshape(-1, 1), vels_post) else: raise NotImplementedError() vis_mins = 10**np.floor(np.log10(np.ma.masked_invalid(vis_post*0.999).min(axis=0))) vis_maxs = 10**np.ceil(np.log10(np.ma.masked_invalid(vis_pre*1.001).max(axis=0))) # make plot fig, ax = plt.subplots(ncols=n_eq_found, sharey=True, layout="constrained") ax = np.atleast_1d(ax) ax[0].set_ylabel("$x_2$ [km]") for i in range(n_eq_found): ax[i].fill_betweenx([0, self.fault.mid_x2_creeping[1] / 1e3], vis_mins[i], vis_maxs[i], facecolor="0.8", label="Locked") ax[i].fill_betweenx(self.fault.mid_x2_creeping[:self.fault.n_creeping_upper] / 1e3, vis_pre[:, i], vis_post[:, i], alpha=0.5, label="Simulated") ax[i].plot(vis_ss, self.fault.mid_x2_creeping[:self.fault.n_creeping_upper] / 1e3, label="Plate Rate") ax[i].set_xscale("log") ax[i].legend(loc="lower left") ax[i].set_ylim(self.fault.mid_x2_creeping[self.fault.n_creeping_upper - 1] / 1e3, 0) ax[i].set_xlim(vis_mins[i], vis_maxs[i]) ax[i].set_title(f"$s={slip_avg[i]:.2g}$ m") ax[i].set_xlabel(r"$\alpha_{eff}$ [Pa * s/m]") # finish if return_viscosities: return fig, ax, vis_pre, vis_ss, vis_post else: return fig, ax def plot_viscosity_timeseries(self, full_state, return_viscosities=False): """ Plot the viscosity timeseries with depth for the entire last cycle. Parameters ---------- full_state : numpy.ndarray State matrix as output from :meth:`~run`. return_viscosities : bool, optional Also return the viscosity timeseries. Returns ------- matplotlib.figure.Figure matplotlib.axes.Axes """ import matplotlib.pyplot as plt from matplotlib.colors import LogNorm from cmcrameri import cm # check that the sequence only has one earthquake if not self.n_eq == 1: raise NotImplementedError("Don't know how to plot viscosity timeseries if " "multiple earthquakes are present in the sequence.") # get index of last earthquake in last cycle time_eq_last = self.eq_df.index[0] + (self.n_cycles_max - 1) * self.T_eff ix_eq_last = (np.flatnonzero(np.isin(self.t_eval_joint, time_eq_last))[0] - self.ix_break_joint[-2]) # reorganize interseismic velocities vels = full_state[self.fault.n_creeping_upper:2*self.fault.n_creeping_upper, self.ix_break_joint[-2]:] vels_pre = vels[:, :ix_eq_last] vels_post = vels[:, ix_eq_last:] vels = np.hstack([vels_post, vels_pre]) # same for time t_last = self.t_eval_joint[self.ix_break_joint[-2]:].copy() t_last_pre = t_last[:ix_eq_last] t_last_post = t_last[ix_eq_last:] t_last_pre += t_last_post[-1] - t_last_pre[0] t_last_joint = np.concatenate([t_last_post, t_last_pre]) t_last_joint -= t_last_joint[0] # convert velocities to effective viscosity if isinstance(self.fault.upper_rheo, NonlinearViscous): vis_ts = SubductionSimulation.get_alpha_eff(self.alpha_n_vec.reshape(-1, 1), self.n_vec.reshape(-1, 1), vels) elif isinstance(self.fault.upper_rheo, RateStateSteadyLogarithmic): vis_ts = SubductionSimulation.get_alpha_eff_from_alpha_h( self.alpha_h_vec.reshape(-1, 1), vels) else: raise NotImplementedError() # get index of deep transition patch_depths = -self.fault.mid_x2_creeping[:self.fault.n_creeping_upper] ix_deep = np.argmin(np.abs(patch_depths - self.fault.upper_rheo.deep_transition)) # subset vels to skip zero-velocity uppermost patch vis_ts = vis_ts[1:, :] # get percentage of final viscosity rel_vis = vis_ts / vis_ts[:, -1][:, None] rel_vis_masked = np.ma.MaskedArray(rel_vis, np.diff(rel_vis, axis=1, prepend=rel_vis[:, 0][:, None] ) <= 0).filled(np.NaN) levels = [0.2, 0.4, 0.6, 0.8] rel_vis_iquant = np.concatenate([np.nanargmax(rel_vis_masked > lvl, axis=1, keepdims=True) for lvl in levels], axis=1) # normalize time t_sub = t_last_joint / self.T_eff # prepare plot fig, ax = plt.subplots(figsize=(10, 5), layout="constrained") # plot velocities c = ax.pcolormesh( t_sub, np.abs(self.fault.end_upper[1, self.fault.n_locked+1:self.fault.n_locked+ix_deep+1] / 1e3), vis_ts[:ix_deep-1, :-1], norm=LogNorm(vmin=10**np.floor(np.log10(np.median(vis_ts[:ix_deep-1, 0]))), vmax=10**np.ceil(np.log10(np.max(vis_ts[:ix_deep-1, -1])))), cmap=cm.batlow, shading="flat") for i in range(len(levels)): ax.plot(t_sub[rel_vis_iquant[:ix_deep-1, i]], patch_depths[1:ix_deep] / 1e3, color="w") ax.set_xscale("symlog", linthresh=1e-3) ax.set_xlim([0, 1]) # make the y-axis increasing downwards to mimic depth even though we're plotting x1 ax.invert_yaxis() # finish figure ax.set_ylabel("Depth $x_2$ [km]") ax.set_xlabel("Normalized Time $t/T$") fig.colorbar(c, ax=ax, location="right", orientation="vertical", fraction=0.05, label=r"$\alpha_{eff}$") fig.suptitle("Effective Viscosity Timeseries") # finish if return_viscosities: return fig, ax, t_sub, vis_ts else: return fig, ax
tobiscode/seqeas-public
seqeas/subduction2d.py
subduction2d.py
py
145,621
python
en
code
0
github-code
6
21721374854
import os import math import json import librosa from settings import ( SAMPLE_RATE, NUM_MFCC, N_FTT, HOP_LENGTH, NUM_SEGMENTS, DURATION, ) DATASET_PATH = "data\\archive\\Data\\genres_original" # loaded using the GTZAN Music Genre Classification dataset at https://www.kaggle.com/datasets/andradaolteanu/gtzan-dataset-music-genre-classification JSON_PATH = "data\\data.json" SAMPLES_PER_TRACK = SAMPLE_RATE * DURATION def dump_mfccs_to_json(dataset_path=None): """ Processes test data as MFCCs and labels """ dataset_path = dataset_path if dataset_path is not None else DATASET_PATH data = { "mapping": [], "mfcc": [], "labels" : [], } samples_per_segment = int(SAMPLES_PER_TRACK/NUM_SEGMENTS) expected_mfcc = math.ceil(samples_per_segment/HOP_LENGTH) for i, (dirpath, dirnames, filenames) in enumerate(os.walk(dataset_path)): if dirpath is not dataset_path: dirpath_components = dirpath.split("\\") label = dirpath_components[-1] data["mapping"].append(label) print(f"Processing: {label}") for f in filenames: file_path = os.path.join(dirpath, f) signal, sr = librosa.load(file_path, sr=SAMPLE_RATE) for s in range(NUM_SEGMENTS): start_sample = samples_per_segment * s finish_sample = start_sample + samples_per_segment mfcc = librosa.feature.mfcc(signal[start_sample:finish_sample], sr=sr, n_fft=N_FTT, n_mfcc=NUM_MFCC, hop_length=HOP_LENGTH) mfcc = mfcc.T if len(mfcc) == expected_mfcc: data["mfcc"].append(mfcc.tolist()) data["labels"].append(i-1) print(f"{file_path}, segment:{s+1}") with open(JSON_PATH, "w") as fp: json.dump(data, fp, indent=4) if __name__ == "__main__": dump_mfccs_to_json()
jmrossi98/genre_detect
src/preprocess_data.py
preprocess_data.py
py
2,051
python
en
code
0
github-code
6
42111163390
from fastapi import Body, FastAPI from pydantic import BaseModel from typing import Annotated from enum import Enum app = FastAPI() class ModelName(str, Enum): afs = "afs" har = "har1" class Item(BaseModel): name: str description: str | None = None price: float tax: float | None = None tags: set[str] = set() fake_items_db = [{"item_name": "Foo"}, {"item_name": "Bar"}, {"item_name": "Baz"}] @app.post("/items/create_item/") async def create_items(item: Item): item_dict = item.model_dump() if item.tax: price_with_tax = item.price + item.tax item_dict.update({"price with tax": price_with_tax}) return item_dict @app.get("/") async def home(): return {"Data": "Test"} @app.get("/items/") async def read_item(skip: int = 0, limit: int = 10): return fake_items_db[skip: skip + limit] @app.put("/add_items/{item_id}") async def add_item(item_id: int, item: Item): return {"item_id": item_id, **item.model_dump()} @app.put("/items/{item_id}") async def update_item(item_id: int, item: Annotated[Item, Body(examples={"name": "foo", "description": "cool item", "price": "24", "tax": 3})]): result = {"item_id": item_id, "item": item} return result @app.get("/models/{model_name}") async def get_model(model_name: ModelName): if model_name is ModelName.afs: return {"model_name": model_name, "message": 1} if model_name.value == "har": return {"model_name": model_name, "message": 2} return {"model_name": model_name, "message": -1} @app.get("/files/{file_path:path}") async def read_file(file_path: str): return {"file_path": file_path}
mkilic20/task
testing.py
testing.py
py
1,663
python
en
code
0
github-code
6
2987884048
from urllib2 import urlopen, HTTPError from django.template.defaultfilters import slugify from django.core.files.base import ContentFile from django.db import transaction, IntegrityError from item.models import Item, Link from movie.models import Movie, Actor, Director, Genre from decorators.retry import retry class LoadMovie(): """ This manager inserts a movie into the database along with its corresponding genres, actors, and directors. """ exists = False def __init__(self, title, imdb_id, runtime, synopsis, theater_date, keywords): """ Inserts the movie into the database if it doesn't already exist in the database. """ try: self.movie, self.created = Movie.objects.get_or_create( title=title, imdb_id=imdb_id, runtime=runtime, synopsis=synopsis, theater_date=theater_date, keywords = keywords, url=slugify(title) ) except IntegrityError: print('TRANSACTION FAILED ON MOVIE INSERT: Rolling back now...') transaction.rollback() def insert_genres(self, genres): """ Inserts the genres for the movie. """ genre_list = [] try: for g in genres: genre, created = Genre.objects.get_or_create( name=g, url=slugify(g)) genre_list.append(genre) self.movie.genre.add(*genre_list) except IntegrityError: print('TRANSACTION FAILED ON GENRE INSERT: Rolling back now...') transaction.rollback() def insert_actors(self, actors): """ Inserts the actors for the movie. """ actor_list = [] try: for a in actors: actor, created = Actor.objects.get_or_create( name=a, url=slugify(a)) actor_list.append(actor) self.movie.actors.add(*actor_list) except IntegrityError: print('TRANSACTION FAILED ON ACTOR INSERT: Rolling back now...') transaction.rollback() def insert_directors(self, directors): """ Inserts the directors for the movie. """ director_list = [] try: for d in directors: director, created = Director.objects.get_or_create( name=d, url=slugify(d)) director_list.append(director) self.movie.directors.add(*director_list) except IntegrityError: print('TRANSACTION FAILED ON DIRECTOR INSERT: Rolling back now...') transaction.rollback() @retry(HTTPError) def insert_image(self, url): """ Inserts the image for the movie. """ try: if 'default.jpg' in self.movie.image.url or self.created: image = urlopen(url, timeout=15) self.movie.image.save( self.movie.url+u'.jpg', ContentFile(image.read()) ) except IntegrityError: print('TRANSACTION FAILED ON IMAGE INSERT: Rolling back now...') transaction.rollback() def insert_trailer(self, url): """ Inserts the trailer as a link. """ try: Link.objects.get_or_create( item=self.movie.item, partner="YouTube", url=url ) except IntegrityError: print('TRANSACTION FAILED ON TRAILER INSERT: Rolling back now...') transaction.rollback()
sameenjalal/mavenize-beta
mavenize/lib/db/loadmovie.py
loadmovie.py
py
3,712
python
en
code
1
github-code
6
11332000472
# -*- coding: utf-8 -*- """ Created on Thu Jul 1 10:10:45 2021 @author: 82106 """ import cv2 import os import sys if not os.path.exists('result'): os.makedirs('result') capture = cv2.VideoCapture(1) if not capture.isOpened(): print('Camera open failed!') sys.exit() ''' frameWidth = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH)) frameHeight = int(capture.get(cv2.CAP_PROP_FRMAE_HEIGHT)) frameSize = (frameWidth, frameHeight) print('frame size : {}'.format(frameSize)) ''' capture.set(cv2.CAP_PROP_FRAME_WIDTH, 640) capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 480) count = 1 while True: ret, frame = capture.read() if not ret: print('Frame read error!') sys.exit() cv2.imshow('frame', frame) key = cv2.waitKey(1) if key == ord('s'): print('Screenshot saved!') cv2.imwrite('result/screenshot{}.png'.format(count), frame, params=[cv2.IMWRITE_PNG_COMPRESSION, 0]) count += 1 elif key == ord('q'): break capture.release() cv2.destroyAllWindows()
dongwooky/Personal-Project
container/camera_screenshot.py
camera_screenshot.py
py
1,084
python
en
code
0
github-code
6
5759183851
#!/usr/bin/python # -*- coding: utf-8 -*- ''' @AUTHOR:Joselyn Zhao @CONTACT:[email protected] @HOME_PAGE:joselynzhao.top @SOFTWERE:PyCharm @FILE:main.py @TIME:2019/6/13 10:32 @DES: ''' import numpy as np import matplotlib.pyplot as plt import tensorflow as tf old_v = tf.logging.get_verbosity() tf.logging.set_verbosity(tf.logging.ERROR) from tensorflow.examples.tutorials.mnist import input_data from Lenet import * from PIL import Image mnist = input_data.read_data_sets('../../../data/mnist', one_hot=True) x_test = np.reshape(mnist.test.images, [-1, 28, 28, 1]) x_test = np.pad(x_test, ((0, 0), (2, 2), (2, 2), (0, 0)), 'constant') # print("Updated Image Shape: {}".format(X_train[0].shape)) tf.logging.set_verbosity(old_v) iteratons = 1000 batch_size = 64 ma = 0 sigma = 0.1 lr = 0.01 def get_sample100(label): sample100_x=[] sample100_y=[] count = 0 for i in range(len(mnist.test.images)): if mnist.test.labels[i][label]==1: count+=1 sample100_y.append(mnist.test.labels[i]) sample100_x.append(mnist.test.images[i]) if count>=100: break return sample100_x,sample100_y def train_lenet(lenet): with tf.Session() as sess: #这个session需要关闭么? sess.run(tf.global_variables_initializer()) tf.summary.image("input",lenet.x,3) merged_summary = tf.summary.merge_all() writer = tf.summary.FileWriter("LOGDIR/4/",sess.graph) # 保存到不同的路径下 # writer.add_graph(sess.graph) for ii in range(iteratons): batch_xs,batch_ys = mnist.train.next_batch(batch_size) batch_xs = np.reshape(batch_xs,[-1,28,28,1]) batch_xs = np.pad(batch_xs,((0, 0), (2, 2), (2, 2), (0, 0)), 'constant') sess.run(lenet.train_step,feed_dict ={lenet.x:batch_xs,lenet.y_:batch_ys}) if ii % 50 == 1: acc,s = sess.run([lenet.accuracy,merged_summary],feed_dict ={lenet.x:x_test,lenet.y_:mnist.test.labels}) writer.add_summary(s,ii) print("%5d: accuracy is: %4f" % (ii, acc)) sample100_x,sample100_y = get_sample100(4) #随便选了一个label 输入0-9的值 sample100_x = np.reshape(sample100_x,[-1,28,28,1]) sample100_x = np.pad(sample100_x, ((0, 0), (2, 2), (2, 2), (0, 0)), 'constant') x_min = tf.reduce_min(lenet.fc2) x_max = tf.reduce_max(lenet.fc2) fc2 = (lenet.fc2 - x_min) / (x_max - x_min) fc2 = sess.run(fc2,feed_dict={lenet.x:sample100_x,lenet.y_:sample100_y}) plt.imshow(fc2) plt.show() print('[accuracy,loss]:', sess.run([lenet.accuracy], feed_dict={lenet.x:x_test,lenet.y_:mnist.test.labels})) if __name__ =="__main__": act = "sigmoid" lenet = Lenet(ma,sigma,lr,act) train_lenet(lenet)
joselynzhao/DeepLearning.Advanceing
DL_6/work/main.py
main.py
py
2,860
python
en
code
5
github-code
6
44602770515
import pytesseract import PIL from os import system import re system("tesseract -l") class workout: reps = 0 exercise_name = "" def compile_text_to_workouts(text): workouts = [] num = 0 for word in text: new_workout = workout() if word.isdigit(): new_workout.reps = word num+=1 while num < len(text) and not text[num].isdigit() : new_workout.exercise_name += " " + str(text[num]) num +=1 if not new_workout.reps == 0 or not new_workout.exercise_name == "": workouts.append(new_workout) return workouts ####MAIN:############################################################### letters = (pytesseract.image_to_string(r'../GetFit/workout_routine1.png')) print(letters) sentence = re.findall(r'\w+', letters) ##turns letters into words and makes list print(sentence) compile_text_to_workouts(sentence) ###turns into actual workout routine
reeyagup/GetFit
image_to_text.py
image_to_text.py
py
972
python
en
code
0
github-code
6
35299316629
# -*- coding: utf-8 -*- import xml.etree.ElementTree as ET from xml.etree import ElementTree as etree from xml.dom import minidom import untangle def xml_generator(input_filename, input_foldername, exif_list, root_path): root = ET.Element('annotation') source = ET.SubElement(root, 'source') image_date = ET.SubElement(source, 'date') image_date.text = str(exif_list[0]) folder_name = ET.SubElement(source, 'folder') folder_name.text = input_foldername file_name = ET.SubElement(source, 'filename') file_name.text = input_filename gpsinfo = ET.SubElement(root, 'gpsinfo') gps_altitude = ET.SubElement(gpsinfo, 'GPSAltitude') gps_altitude.text = str(exif_list[1]) gps_latitude = ET.SubElement(gpsinfo, 'GPSLatitude') gps_latitude.text = str(exif_list[2]) gps_latitude_ref = ET.SubElement(gpsinfo, 'GPSLatitudeRef') gps_latitude_ref.text = str(exif_list[3]) gps_longitude = ET.SubElement(gpsinfo, 'GPSLongitude') gps_longitude.text = str(exif_list[4]) gps_longitude_ref = ET.SubElement(gpsinfo, 'GPSLongitudeRef') gps_longitude_ref.text = str(exif_list[5]) ''' There should be position annotation inside 'object' tag ''' #ann_obj = ET.SubElement(root, 'object') xml_string = etree.tostring(root) tree = minidom.parseString(xml_string) xml_string = tree.toxml() save_path = '%s/ob_%s/%s.xml' % (root_path, input_foldername, input_filename[:-4]) f=open(save_path,'wb') f.write(tree.toprettyxml(encoding='utf-8')) f.close() def xml_parsing(input_xml_file): obj = untangle.parse(input_xml_file) date_time = obj.annotation.source.date.cdata GPSAltitude = obj.annotation.gpsinfo.GPSAltitude.cdata GPSLatitude = obj.annotation.gpsinfo.GPSLatitude.cdata GPSLatitudeRef = obj.annotation.gpsinfo.GPSLatitudeRef.cdata GPSLongitude = obj.annotation.gpsinfo.GPSLongitude.cdata GPSLongitudeRef = obj.annotation.gpsinfo.GPSLongitudeRef.cdata xml_info_keys = ['Date', 'GPSAltitude', 'GPSLatitude', 'GPSLatitudeRef', 'GPSLongitude', 'GPSLongitudeRef'] xml_info_value = [date_time, GPSAltitude, GPSLatitude, GPSLatitudeRef, GPSLongitude, GPSLongitudeRef] xml_info_dict = dict(zip(xml_info_keys, xml_info_value)) return xml_info_dict #im = '/Users/xiang/ml_ann/ann_tools_eric/dataset/ob_curr/00001.xml' #xml_parsing(im)
simonchanper/ml_ann
ann_tools_eric/xml_process.py
xml_process.py
py
2,403
python
en
code
0
github-code
6
19699008636
# Definition for singly-linked list. # class ListNode: # def __init__(self, x): # self.val = x # self.next = None class Solution: def getIntersectionNode(self, headA: ListNode, headB: ListNode) -> ListNode: d={} while headA: d[headA] = 1 headA = headA.next while headB: if headB in d: return headB headB=headB.next # the idea is to traverse list A and store the address/reference to each node # in a hash set. Then check every node bi in list B: if bi appears in the hash set, # then bi is the intersection node. # I did not realize that the hash set can be created like this
Superhzf/python_exercise
Linked List/Intersection of Two Linked Lists/solution.py
solution.py
py
692
python
en
code
1
github-code
6
19516521842
#for X in range(1,10): #print(X) #for char in "cofee": #print(char * 10) #for num in range (0,20,2):#if you start with odd nums it will print odd(1,20,2) #print(num) #times = input("how many times do i have to tell you? ") #times = int(times) #for time in range(times) : # print ("clean up your room!") #for num in range(1,21): #if num ==4 or num ==13: #print(f"{num} is unlucky") #elif num % 2==0: #print(f"{num} is even") # else: #print(f"{num} is odd") #for num in range(1,21): #if num ==4 or num ==13: #print(f"{num} is unlucky") #elif num % 2==0: #print(f"{num} is even") #else: #state() #print(f"{num} is odd") #while loop #msg = input("whats your password?") #while msg != "bananas": #print("wrong!") #msg = input("whats your password?") #print("correct!") #num =1 #while num < 11: #print(num) #num += 1 #for num in range(1,11) : #print(" \U0001f600" * num) #times = 1 #while times < 11: #print(" \U0001f600" * times) #times += 1 #msg = input("say something: ") #while msg != "stop copying me": #print(msg) #msg = input() #print("you win!") while True: command = input("type 'exit' to exit:") if (command == "exit"): break times = int(input("how many times do i have to tell yah?")) for time in range(times): print("claen your room!") if time >= 3: print("do you even listen anymore") break
mevine/seen
jee.py
jee.py
py
1,511
python
en
code
0
github-code
6
71504118267
from __future__ import annotations from io import BufferedIOBase, BytesIO from typing import List, Optional from helper import ( byte_to_int, encode_varstr, hash160, int_to_byte, int_to_little_endian, little_endian_to_int, read_varint, sha256, ) from op import ( decode_num, encode_minimal_num, is_number_op_code, number_to_op_code, op_code_to_number, OP_0, OP_CHECKLOCKTIMEVERIFY, OP_CHECKMULTISIG, OP_CHECKMULTISIGVERIFY, OP_CHECKSEQUENCEVERIFY, OP_CHECKSIG, OP_CHECKSIGVERIFY, OP_DROP, OP_DUP, OP_EQUAL, OP_EQUALVERIFY, OP_FROMALTSTACK, OP_HASH160, OP_IF, OP_NOTIF, OP_PUSHDATA1, OP_PUSHDATA2, OP_TOALTSTACK, OP_VERIFY, OP_CODE_NAMES, OP_CODE_FUNCTIONS, ) from timelock import Locktime, Sequence from witness import Witness class Script(list): def __add__(self, other: Script) -> Script: return self.__class__(super().__add__(other)) def __radd__(self, other: Script) -> Script: o = self.__class__(other) return o + self def __new__(cls, commands: Optional[List[Union(bytes, str)]] = None) -> Script: if commands is None: commands = [] for current in commands: if type(current) not in (bytes, ): raise ValueError( f'Every command should be bytes or str, got {current} instead' ) return super().__new__(cls, commands) def __repr__(self) -> str: result = '' for current in self: if OP_CODE_NAMES.get(current): result += f'{OP_CODE_NAMES[current]} ' elif type(current) == str: result += f'<{current}> ' else: result += f'{current.hex()} ' return result @classmethod def parse(cls, s: BufferedIOBase) -> Script: # get the length of the entire field length = read_varint(s) # initialize the commands array commands = [] # initialize the number of bytes we've read to 0 count = 0 # loop until we've read length bytes while count < length: # get the current byte current = s.read(1) # increment the bytes we've read count += 1 # convert the current byte to an integer current_int = current[0] # if the current byte is between 1 and 75 inclusive if current_int <= 75: # add the next n bytes as a command commands.append(s.read(current_int)) count += current_int elif current == OP_PUSHDATA1: # op_pushdata1 data_length = byte_to_int(s.read(1)) commands.append(s.read(data_length)) count += data_length + 1 elif current == OP_PUSHDATA2: # op_pushdata2 data_length = little_endian_to_int(s.read(2)) commands.append(s.read(data_length)) count += data_length + 2 else: # add the command to the list of commands commands.append(current) if count != length: raise SyntaxError(f'parsing script failed {commands}') return cls(commands) def miniscript(self): from miniscript import MiniScript return MiniScript.from_script(Script(self[:])) def is_locktime_locked(self) -> bool: '''Returns whether the script starts with <locktime> OP_CLTV OP_DROP''' return len(self) >= 3 and \ (is_number_op_code(self[0]) or len(self[0]) > 1) and \ self[1] == OP_CHECKLOCKTIMEVERIFY and self[2] == OP_DROP def is_multisig(self) -> bool: '''Returns whether the script follows the OP_k <pubkey1>...<pubkeyn> OP_n OP_CHECKMULTISIG pattern''' if self[-1] != OP_CHECKMULTISIG: return False if not is_number_op_code(self[-2]): return False n = op_code_to_number(self[-2]) if len(self) < n + 3: return False for current in self[-n - 2:-2]: if len(current) != 33: return False if not is_number_op_code(self[-n - 3]): return False k = op_code_to_number(self[-n - 3]) if k < 1 or k > 15: return False if n < k or n > 15: return False return True def is_multisig_timelock(self) -> bool: '''Returns whether the script follows the <locktime> OP_CLTV/OP_CSV OP_DROP OP_k <pubkey1>...<pubkeyn> OP_n OP_CHECKMULTISIG pattern''' return (self.is_sequence_locked() or self.is_locktime_locked()) and \ self.is_multisig() def is_p2pkh(self) -> bool: '''Returns whether the script follows the OP_DUP OP_HASH160 <20 byte hash> OP_EQUALVERIFY OP_CHECKSIG pattern.''' # there should be exactly 5 commands # OP_DUP, OP_HASH160, 20-byte hash, OP_EQUALVERIFY, OP_CHECKSIG return len(self) == 5 and self[0] == OP_DUP and self[1] == OP_HASH160 \ and len(self[2]) == 20 and self[3] == OP_EQUALVERIFY \ and self[4] == OP_CHECKSIG def is_p2sh(self) -> bool: '''Returns whether the script follows the OP_HASH160 <20 byte hash> OP_EQUAL pattern.''' # there should be exactly 3 commands # OP_HASH160, 20-byte hash, OP_EQUAL return len(self) == 3 and self[0] == OP_HASH160 and len(self[1]) == 20 \ and self[2] == OP_EQUAL def is_p2wpkh(self) -> bool: '''Returns whether the script follows the OP_0 <20 byte hash> pattern.''' return len(self) == 2 and self[0] == OP_0 and len(self[1]) == 20 def is_p2wsh(self) -> bool: '''Returns whether the script follows the OP_0 <32 byte hash> pattern.''' return len(self) == 2 and self[0] == OP_0 and len(self[1]) == 32 def is_segwit(self) -> bool: return self.is_p2wpkh() or self.is_p2wsh() def is_sequence_locked(self) -> bool: '''Returns whether the script starts with <sequence> OP_CSV OP_DROP''' return len(self) >= 3 and \ (is_number_op_code(self[0]) or len(self[0]) > 1) and \ self[1] == OP_CHECKSEQUENCEVERIFY and self[2] == OP_DROP def is_timelock(self) -> bool: '''Returns whether the script follows the locktime OP_CLTV OP_DROP <pubkey> OP_CHECKSIG pattern''' return (self.is_sequence_locked() or self.is_locktime_locked()) and \ len(self) == 5 and len(self[3]) == 33 and self[4] == OP_CHECKSIG def pubkeys(self) -> List[bytes]: pubkeys = [] for item in self: if len(item) == 33 and item[0] in (2, 3): pubkeys.append(item) return pubkeys def raw_serialize(self) -> bytes: # initialize what we'll send back result = b'' # go through each command for current in self: if current == OP_0: result += int_to_byte(0) elif OP_CODE_NAMES.get(current) is None: # this is an element # get the length in bytes length = len(current) # for large lengths, we have to use a pushdata op code if length < 75: # turn the length into a single byte integer result += int_to_byte(length) elif length > 75 and length < 0x100: # 76 is pushdata1 result += OP_PUSHDATA1 result += int_to_byte(length) elif length >= 0x100 and length <= 520: # 77 is pushdata2 result += OP_PUSHDATA2 result += int_to_little_endian(length, 2) else: raise ValueError('too long a command') result += current return result def serialize(self) -> bytes: return encode_varstr(self.raw_serialize()) class ScriptPubKey(Script): '''Represents a ScriptPubKey in a transaction''' @classmethod def parse(cls, s: BufferedIOBase) -> ScriptPubKey: script_pubkey = super().parse(s) if script_pubkey.is_p2pkh(): return PKHScriptPubKey.from_hash(script_pubkey[2]) elif script_pubkey.is_p2sh(): return SHScriptPubKey.from_hash(script_pubkey[1]) elif script_pubkey.is_p2wpkh(): return WPKHScriptPubKey.from_hash(script_pubkey[1]) elif script_pubkey.is_p2wsh(): return WSHScriptPubKey.from_hash(script_pubkey[1]) else: return script_pubkey def redeem_script(self) -> RedeemScript: '''Convert this ScriptPubKey to its RedeemScript equivalent''' return RedeemScript(self) class PKHScriptPubKey(ScriptPubKey): @classmethod def from_hash(cls, h160: bytes) -> PKHScriptPubKey: if len(h160) != 20: raise TypeError('h160 should be 20 bytes') return cls([OP_DUP, OP_HASH160, h160, OP_EQUALVERIFY, OP_CHECKSIG]) def hash160(self) -> bytes: return self[2] class SHScriptPubKey(ScriptPubKey): @classmethod def from_hash(cls, h160: bytes) -> SHScriptPubKey: if len(h160) != 20: raise TypeError('h160 should be 20 bytes') return cls([OP_HASH160, h160, OP_EQUAL]) def hash160(self) -> bytes: return self[1] class RedeemScript(Script): '''Subclass that represents a RedeemScript for p2sh''' def hash160(self) -> bytes: '''Returns the hash160 of the serialization of the RedeemScript''' return hash160(self.raw_serialize()) def script_pubkey(self) -> SHScriptPubKey: '''Returns the ScriptPubKey that this RedeemScript corresponds to''' return SHScriptPubKey.from_hash(self.hash160()) class SegwitPubKey(ScriptPubKey): def hash(self) -> bytes: return self[1] class WPKHScriptPubKey(SegwitPubKey): @classmethod def from_hash(cls, h160: bytes) -> WPKHScriptPubKey: if len(h160) != 20: raise TypeError('h160 should be 20 bytes') return cls([OP_0, h160]) class WSHScriptPubKey(SegwitPubKey): @classmethod def from_hash(cls, s256: bytes) -> WSHScriptPubKey: if len(s256) != 32: raise TypeError('s256 should be 32 bytes') return cls([OP_0, s256]) class WitnessScript(Script): '''Subclass that represents a WitnessScript for p2wsh''' def redeem_script(self) -> RedeemScript: return self.script_pubkey().redeem_script() def script_pubkey(self) -> WSHScriptPubKey: '''Generates the ScriptPubKey for p2wsh''' # get the sha256 of the current script # return new p2wsh script using p2wsh_script return WSHScriptPubKey.from_hash(self.sha256()) def sha256(self) -> bytes: '''Returns the sha256 of the raw serialization for witness program''' return sha256(self.raw_serialize()) class MultiSigScript(Script): @classmethod def from_pubkeys(cls, k: int, sec_pubkeys: List[bytes]) -> MultiSigScript: n = len(sec_pubkeys) if k == 0 or k > n: raise ValueError(f'cannot do {k} of {n} keys') return cls([ number_to_op_code(k), *sorted(sec_pubkeys), number_to_op_code(n), OP_CHECKMULTISIG ]) class MultiSigRedeemScript(RedeemScript, MultiSigScript): pass class MultiSigWitnessScript(WitnessScript, MultiSigScript): pass class TimelockScript(Script): @classmethod def from_time(cls, locktime: Optional[Locktime] = None, sequence: Optional[Sequence] = None) -> List[bytes]: if locktime is not None: return [ encode_minimal_num(locktime), OP_CHECKLOCKTIMEVERIFY, OP_DROP ] elif sequence is not None: return [ encode_minimal_num(sequence), OP_CHECKSEQUENCEVERIFY, OP_DROP ] else: raise ValueError('locktime or sequence required') class SingleSigTimelockScript(TimelockScript): @classmethod def from_pubkey_time( cls, sec: bytes, locktime: Optional[Locktime] = None, sequence: Optional[Sequence] = None) -> SingleSigTimelockScript: script = cls.from_time(locktime, sequence) + [sec, OP_CHECKSIG] return cls(script) class SingleSigTimelockRedeemScript(RedeemScript, SingleSigTimelockScript): pass class SingleSigTimelockWitnessScript(WitnessScript, SingleSigTimelockScript): pass class MultiSigTimelockScript(TimelockScript, MultiSigScript): @classmethod def from_pubkeys_time( cls, k: int, sec_pubkeys: List[bytes], locktime: Optional[Locktime] = None, sequence: Optional[Sequence] = None) -> MultiSigTimelockScript: script = cls.from_time(locktime, sequence) + cls.from_pubkeys( k, sec_pubkeys) return cls(script) class MultiSigTimelockRedeemScript(RedeemScript, MultiSigTimelockScript): pass class MultiSigTimelockWitnessScript(WitnessScript, MultiSigTimelockScript): pass
jimmysong/minipy
script.py
script.py
py
13,382
python
en
code
1
github-code
6
7626498457
vertices = [] arestas = [] matriz = [] class Grafo: def __init__(self, no, noAux, prioridade): self.no = no self.noAux = noAux self.prioridade = prioridade grafo = open('arquivomatriz.txt', 'r') for i in grafo: linha = i.split() arestas.append(Grafo(int(linha[0]), int(linha[1]), int(linha[2]))) grafo.close() def Inserir(vector): inserido = False for i in range( len(vertices) ): if (vector == vertices[i]): inserido = True break return inserido for i in range( len(arestas) ): if(not Inserir(arestas[i].no)): vertices.append(arestas[i].no) if(not Inserir(arestas[i].noAux)): vertices.append(arestas[i].noAux) vertices = sorted(vertices) for i in range( len(vertices) ): #Preenche matriz com 0's linha = [] for j in range( len(vertices) ): linha.append(0) matriz.append(linha) for i in range( len(arestas) ): # matriz adjacente matriz[arestas[i].no][arestas[i].noAux] = arestas[i].prioridade matriz[arestas[i].noAux][arestas[i].no] = arestas[i].prioridade print() print("Matriz Adja: ") for i in range( len(matriz) ): print(matriz[i]) print() print("O grau de cada vértice é: ") for i in range( len(matriz) ): g = 0 for j in range( len(matriz[i]) ): if(matriz[i][j] != 0): g += 1 print('grau do {}: {}'.format(i,g) )
gustavoadl06/Gustavo
6.py
6.py
py
1,535
python
pt
code
0
github-code
6
36670049284
import matplotlib.pyplot as plt # from mpl_toolkits.axes_grid1 import ImageGrid # import numpy as np from os import listdir from os import chdir from os import path from PIL import Image # import matplotlib.gridspec as gridspec import argparse parser = argparse.ArgumentParser(description="generate plot for report") parser.add_argument("--input_dir", required=True, help="Input ROS bag.") parser.add_argument("--rows", required=True, help="numer of rows in figure") parser.add_argument("--cols", required=True, help="number of columns in figure") args = parser.parse_args() # chdir('/Volumes/macOS Big Sur/Users/pmvanderburg/matplotlib_test/') chdir(args.input_dir) files = listdir(args.input_dir) files.sort() for i, f in enumerate(files): if f!='.DS_Store': print(i,f) else: del files[i] images = [Image.open(f) for f in files] print(len(images)) max_rows = 7 max_cols = 3 # max_rows = 3 # max_cols = 2 methods=['Input image', '640x480 N+FT', '832x256 K+FT', '640x480 N', '832x256 N', '640x480 K', '832x256 K'] fig, axes = plt.subplots(nrows=7, ncols=3, figsize=(9,10),sharex=True, sharey=True) for idx, image in enumerate(images): # print(files[idx]) print(idx) row = idx % max_rows col = idx // max_rows print(row,' row') print(col,' col') # if col>0: # axes[row, col].axis("off") axes[row,col].spines['bottom'].set_color('#ffffff') axes[row,col].spines['top'].set_color('#ffffff') axes[row,col].spines['right'].set_color('#ffffff') axes[row,col].spines['left'].set_color('#ffffff') if image.size==(1280, 720): image = image.resize((640,480)) axes[row, col].imshow(image, cmap="gray", aspect="auto") axes[row, 0].set_ylabel(methods[row]) plt.subplots_adjust(wspace=.05, hspace=.05) plt.xticks([]) plt.yticks([]) # fig.savefig(path.join) plt.show()
ThijsvdBurg/Husky_scripts
data_visualization/plot scripts/plot_results.py
plot_results.py
py
1,911
python
en
code
1
github-code
6
10858272527
# coding: utf-8 # In[1]: from pandas import DataFrame, read_csv import matplotlib.pyplot as plt import pandas as pd import numpy as np from sklearn.decomposition import TruncatedSVD from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.feature_extraction.text import HashingVectorizer from sklearn.feature_extraction.text import TfidfTransformer from sklearn.pipeline import make_pipeline from sklearn.preprocessing import Normalizer from sklearn import metrics from sklearn.cluster import KMeans, MiniBatchKMeans import sys from time import time import numpy as np # In[2]: df = pd.read_csv('lyrics.csv') df.head(10) # In[3]: df['lyrics'].replace('', np.nan, inplace=True) df.dropna(subset=['lyrics'], inplace=True) ind_drop = df[df['genre'].apply(lambda x: x.startswith('Other'))].index df = df.drop(ind_drop) # In[4]: ind_drop = df[df['genre'].apply(lambda x: x.startswith('Not Available'))].index df = df.drop(ind_drop) # In[5]: ind_drop = df[df['lyrics'].apply(lambda x: x.startswith('INSTRUMENTAL'))].index df = df.drop(ind_drop) df.drop(columns=['index']) ind_drop = df[df['lyrics'].apply(lambda x: x.startswith('instrumental'))].index df = df.drop(ind_drop) df.drop(columns=['index']) # In[6]: genre=df['genre'].values lyrics=df['lyrics'].values true_k = len(np.unique(genre)) print(np.unique(genre), "The total number of genres are", true_k) #shaping: lyrics = np.array(lyrics)[:,None] print(lyrics.shape) genre = np.array(genre)[:,None] print(genre.shape) # In[7]: data = np.append(lyrics,genre,axis=1) data.shape print(data) # In[8]: np.random.shuffle(data) data_test = data[10001:20001,] data = data[:10000,] # In[9]: data_lyrics=data[:,0] data_genre=data[:,1] data_lyrics_test = data_test[:,0] data_genre_test = data_test[:,1] # print(data_lyrics) # print(data_genre.shape) # In[10]: vectorizer = TfidfVectorizer( max_df=0.75, # max doc freq (as a fraction) of any word to include in the vocabulary min_df=0.3, # min doc freq (as doc counts) of any word to include in the vocabulary max_features=10000, # max number of words in the vocabulary stop_words='english', # remove English stopwords use_idf=True ) # In[11]: labels={'Country':1, 'Electronic':2, 'Folk':3, 'Hip-Hop':4, 'Indie':5, 'Jazz':6, 'Metal':7, 'Pop':8, 'R&B':9, 'Rock':10} print("Extracting features from the training dataset using a sparse vectorizer") t0 = time() vectorizer.fit(data_lyrics) X = vectorizer.transform(data_lyrics) Y = [labels[i] for i in data_genre] X_test = vectorizer.transform(data_lyrics_test) Y_test = [labels[i] for i in data_genre_test] n_features = X.shape[1] print("done in %fs" % (time() - t0)) print("n_samples: %d, n_features: %d" % X.shape) # In[12]: doc_ind = 1 # Index of an example document xi = X[doc_ind,:].todense() term_ind = xi.argsort()[:, ::-1] xi_sort = xi[0,term_ind] terms = vectorizer.get_feature_names() for i in range(n_features): term = terms[term_ind[0,i]] tfidf = xi[0,term_ind[0,i]] print('{0:20s} {1:f} '.format(term, tfidf)) # In[13]: km = KMeans(n_clusters=true_k, init='k-means++', max_iter=100, n_init=1, verbose=True) # In[14]: print("Clustering sparse data with %s" % km) t0 = time() km.fit(X) print("done in %0.3fs" % (time() - t0)) print() # In[15]: order_centroids = km.cluster_centers_.argsort()[:, ::-1] for i in range(true_k): print("Cluster %d:" % i, end='') for ind in order_centroids[i, :10]: print(' %s' % terms[ind], end='') print() # In[16]: labels={'Country':1, 'Electronic':2, 'Folk':3, 'Hip-Hop':4, 'Indie':5, 'Jazz':6, 'Metal':7, 'Pop':8, 'R&B':9, 'Rock':10} print(labels.values) # genre_names # data_genre genre_labels=[] #print(genre_labels.shape) for j,i in enumerate(data_genre): x=labels[i] #print(x) np.append(genre_labels,x) genre_labels.append(x) #print(genre_labels) # In[17]: print((Y_test == km.predict(X_test)).sum() / len(Y_test)) # In[18]: labelkm = km.labels_ print(labelkm.shape) print(type(labelkm)) # In[19]: #print(data_genre) labelkm = km.labels_ from sklearn.metrics import confusion_matrix C = confusion_matrix(genre_labels,labelkm) Csum = np.sum(C,axis=0) Cnorm = C / Csum[None,:] print(Cnorm) print(np.array_str(C, precision=3, suppress_small=True)) plt.imshow(C, interpolation='none') plt.colorbar()
TejaishwaryaGagadam/music_genre_predictor
K_Means_Clustering.py
K_Means_Clustering.py
py
4,472
python
en
code
0
github-code
6
10931063926
import unittest import requests_mock from alertaclient.api import Client class PermissionTestCase(unittest.TestCase): def setUp(self): self.client = Client() self.perm = """ { "id": "584f38f4-b44e-4d87-9b61-c106d21bcc7a", "permission": { "href": "http://localhost:8080/perm/584f38f4-b44e-4d87-9b61-c106d21bcc7a", "id": "584f38f4-b44e-4d87-9b61-c106d21bcc7a", "match": "websys", "scopes": [ "admin:users", "admin:keys", "write" ] }, "status": "ok" } """ @requests_mock.mock() def test_permission(self, m): m.post('http://localhost:8080/perm', text=self.perm) perm = self.client.create_perm(role='websys', scopes=['admin:users', 'admin:keys', 'write']) self.assertEqual(perm.match, 'websys') self.assertEqual(sorted(perm.scopes), sorted(['admin:users', 'admin:keys', 'write']))
alerta/python-alerta-client
tests/unit/test_permissions.py
test_permissions.py
py
1,065
python
en
code
27
github-code
6