{ // 获取包含Hugging Face文本的span元素 const spans = link.querySelectorAll('span.whitespace-nowrap, span.hidden.whitespace-nowrap'); spans.forEach(span => { if (span.textContent && span.textContent.trim().match(/Hugging\s*Face/i)) { span.textContent = 'AI快站'; } }); }); // 替换logo图片的alt属性 document.querySelectorAll('img[alt*="Hugging"], img[alt*="Face"]').forEach(img => { if (img.alt.match(/Hugging\s*Face/i)) { img.alt = 'AI快站 logo'; } }); } // 替换导航栏中的链接 function replaceNavigationLinks() { // 已替换标记,防止重复运行 if (window._navLinksReplaced) { return; } // 已经替换过的链接集合,防止重复替换 const replacedLinks = new Set(); // 只在导航栏区域查找和替换链接 const headerArea = document.querySelector('header') || document.querySelector('nav'); if (!headerArea) { return; } // 在导航区域内查找链接 const navLinks = headerArea.querySelectorAll('a'); navLinks.forEach(link => { // 如果已经替换过,跳过 if (replacedLinks.has(link)) return; const linkText = link.textContent.trim(); const linkHref = link.getAttribute('href') || ''; // 替换Spaces链接 - 仅替换一次 if ( (linkHref.includes('/spaces') || linkHref === '/spaces' || linkText === 'Spaces' || linkText.match(/^s*Spacess*$/i)) && linkText !== 'OCR模型免费转Markdown' && linkText !== 'OCR模型免费转Markdown' ) { link.textContent = 'OCR模型免费转Markdown'; link.href = 'https://fast360.xyz'; link.setAttribute('target', '_blank'); link.setAttribute('rel', 'noopener noreferrer'); replacedLinks.add(link); } // 删除Posts链接 else if ( (linkHref.includes('/posts') || linkHref === '/posts' || linkText === 'Posts' || linkText.match(/^s*Postss*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } // 替换Docs链接 - 仅替换一次 else if ( (linkHref.includes('/docs') || linkHref === '/docs' || linkText === 'Docs' || linkText.match(/^s*Docss*$/i)) && linkText !== '模型下载攻略' ) { link.textContent = '模型下载攻略'; link.href = '/'; replacedLinks.add(link); } // 删除Enterprise链接 else if ( (linkHref.includes('/enterprise') || linkHref === '/enterprise' || linkText === 'Enterprise' || linkText.match(/^s*Enterprises*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } }); // 查找可能嵌套的Spaces和Posts文本 const textNodes = []; function findTextNodes(element) { if (element.nodeType === Node.TEXT_NODE) { const text = element.textContent.trim(); if (text === 'Spaces' || text === 'Posts' || text === 'Enterprise') { textNodes.push(element); } } else { for (const child of element.childNodes) { findTextNodes(child); } } } // 只在导航区域内查找文本节点 findTextNodes(headerArea); // 替换找到的文本节点 textNodes.forEach(node => { const text = node.textContent.trim(); if (text === 'Spaces') { node.textContent = node.textContent.replace(/Spaces/g, 'OCR模型免费转Markdown'); } else if (text === 'Posts') { // 删除Posts文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } else if (text === 'Enterprise') { // 删除Enterprise文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } }); // 标记已替换完成 window._navLinksReplaced = true; } // 替换代码区域中的域名 function replaceCodeDomains() { // 特别处理span.hljs-string和span.njs-string元素 document.querySelectorAll('span.hljs-string, span.njs-string, span[class*="hljs-string"], span[class*="njs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换hljs-string类的span中的域名(移除多余的转义符号) document.querySelectorAll('span.hljs-string, span[class*="hljs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换pre和code标签中包含git clone命令的域名 document.querySelectorAll('pre, code').forEach(element => { if (element.textContent && element.textContent.includes('git clone')) { const text = element.innerHTML; if (text.includes('huggingface.co')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 处理特定的命令行示例 document.querySelectorAll('pre, code').forEach(element => { const text = element.innerHTML; if (text.includes('huggingface.co')) { // 针对git clone命令的专门处理 if (text.includes('git clone') || text.includes('GIT_LFS_SKIP_SMUDGE=1')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 特别处理模型下载页面上的代码片段 document.querySelectorAll('.flex.border-t, .svelte_hydrator, .inline-block').forEach(container => { const content = container.innerHTML; if (content && content.includes('huggingface.co')) { container.innerHTML = content.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 特别处理模型仓库克隆对话框中的代码片段 try { // 查找包含"Clone this model repository"标题的对话框 const cloneDialog = document.querySelector('.svelte_hydration_boundary, [data-target="MainHeader"]'); if (cloneDialog) { // 查找对话框中所有的代码片段和命令示例 const codeElements = cloneDialog.querySelectorAll('pre, code, span'); codeElements.forEach(element => { if (element.textContent && element.textContent.includes('huggingface.co')) { if (element.innerHTML.includes('huggingface.co')) { element.innerHTML = element.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { element.textContent = element.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); } // 更精确地定位克隆命令中的域名 document.querySelectorAll('[data-target]').forEach(container => { const codeBlocks = container.querySelectorAll('pre, code, span.hljs-string'); codeBlocks.forEach(block => { if (block.textContent && block.textContent.includes('huggingface.co')) { if (block.innerHTML.includes('huggingface.co')) { block.innerHTML = block.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { block.textContent = block.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); }); } catch (e) { // 错误处理但不打印日志 } } // 当DOM加载完成后执行替换 if (document.readyState === 'loading') { document.addEventListener('DOMContentLoaded', () => { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); }); } else { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); } // 增加一个MutationObserver来处理可能的动态元素加载 const observer = new MutationObserver(mutations => { // 检查是否导航区域有变化 const hasNavChanges = mutations.some(mutation => { // 检查是否存在header或nav元素变化 return Array.from(mutation.addedNodes).some(node => { if (node.nodeType === Node.ELEMENT_NODE) { // 检查是否是导航元素或其子元素 if (node.tagName === 'HEADER' || node.tagName === 'NAV' || node.querySelector('header, nav')) { return true; } // 检查是否在导航元素内部 let parent = node.parentElement; while (parent) { if (parent.tagName === 'HEADER' || parent.tagName === 'NAV') { return true; } parent = parent.parentElement; } } return false; }); }); // 只在导航区域有变化时执行替换 if (hasNavChanges) { // 重置替换状态,允许再次替换 window._navLinksReplaced = false; replaceHeaderBranding(); replaceNavigationLinks(); } }); // 开始观察document.body的变化,包括子节点 if (document.body) { observer.observe(document.body, { childList: true, subtree: true }); } else { document.addEventListener('DOMContentLoaded', () => { observer.observe(document.body, { childList: true, subtree: true }); }); } })(); \n\n\"\"\"","_____no_output_____"],["# Erstelle Pie Chart.\nimport pygal \npie_chart = pygal.Pie()\npie_chart.title = 'Dateitypen'\nfor einzelneDateitypen in datatype:\n index= datatype.index(einzelneDateitypen)\n anzahl=frequency[index]\n pie_chart.add(einzelneDateitypen, anzahl)\ndisplay(HTML(base_html.format(rendered_chart=pie_chart.render(is_unicode=True))))","_____no_output_____"]]],"string":"[\n [\n [\n \"# Welche Dateitypen gibt es?\",\n \"_____no_output_____\"\n ],\n [\n \"## 1. Verbindung zur Datenbank\\nEs wird eine Verbindung zur Neo4j-Datenbank aufgebaut.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"import py2neo\\n\\ngraph = py2neo.Graph(bolt=True, host='localhost', user='neo4j', password='neo4j')\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"## 2. Cypher-Abfrage\\nEs wird eine Abfrage an die Datenbank gestellt. Das Ergebnis wird in einem Dataframe (pandas) gespeichert.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"import pandas as pd\\n\\nquery =\\\"MATCH (f:Git:File) RETURN f.relativePath as relativePath\\\"\\ndf = pd.DataFrame(graph.run(query).data())\\n\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"## 3. Datenaufbereitung\\nZur Kontrolle werden die ersten fünf Zeilen des Ergebnisses der Abfrage als Tabelle ausgegeben.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"df.head()\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"Der folgende Codeabschnitt extrahiert die verschiedenen Dateitypen entsprechend der Dateiendung und zählt deren Häufigkeit. Die Dateitypen werden in der Variable datatype und deren Häufigkeit in der Variable frequency gespeichert.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# Extrahiere Dateitypen aus Spalte des Dataframes.\\ndatatypes = df['relativePath'].str.rsplit('.', 1).str[1]\\n\\n# Zähle die Anzahl der Dateitypen und bilde diese in einem Series-Objekt ab.\\nseries = datatypes.value_counts()\\n\\n# Erzeuge zwei Listen aus dem Series-Objekt.\\ndatatype = list(series.index)\\nfrequency = list(series)\\n\\n# Erzeuge die Kategorie \\\"andere\\\", in der alle Dateitypen gesammelt werden, die weniger oder genau 20 mal auftauchen.\\nandere = 0\\nfor wert in frequency[:]:\\n index = frequency.index(wert)\\n if wert <= 20:\\n andere += wert\\n datatype.remove(datatype[index])\\n frequency.remove(wert)\\nfrequency.append(andere)\\ndatatype.append(\\\"andere\\\")\\n\\nprint(frequency)\\nprint(datatype)\\n\",\n \"[1383, 80, 41, 36, 21, 126]\\n['java', 'html', 'class', 'gif', 'txt', 'andere']\\n\"\n ]\n ],\n [\n [\n \"## 4. Visualisierung\\nDie Daten werden mittels eines Pie Charts visualisiert.\\n\\n\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"from IPython.display import display, HTML\\n\\nbase_html = \\\"\\\"\\\"\\n\\n\\n \\n \\n \\n \\n \\n
\\n {rendered_chart}\\n
\\n \\n\\n\\\"\\\"\\\"\",\n \"_____no_output_____\"\n ],\n [\n \"# Erstelle Pie Chart.\\nimport pygal \\npie_chart = pygal.Pie()\\npie_chart.title = 'Dateitypen'\\nfor einzelneDateitypen in datatype:\\n index= datatype.index(einzelneDateitypen)\\n anzahl=frequency[index]\\n pie_chart.add(einzelneDateitypen, anzahl)\\ndisplay(HTML(base_html.format(rendered_chart=pie_chart.render(is_unicode=True))))\",\n \"_____no_output_____\"\n ]\n ]\n]"},"cell_types":{"kind":"list like","value":["markdown","code","markdown","code","markdown","code","markdown","code","markdown","code"],"string":"[\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\"\n]"},"cell_type_groups":{"kind":"list like","value":[["markdown","markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code","code"]],"string":"[\n [\n \"markdown\",\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\"\n ]\n]"}}},{"rowIdx":1459029,"cells":{"hexsha":{"kind":"string","value":"e7ee31db980ecec51c9383f5abfbcf5416a968a8"},"size":{"kind":"number","value":16568,"string":"16,568"},"ext":{"kind":"string","value":"ipynb"},"lang":{"kind":"string","value":"Jupyter Notebook"},"max_stars_repo_path":{"kind":"string","value":"Basics.ipynb"},"max_stars_repo_name":{"kind":"string","value":"Joe-mabus/Thanksgiving_Dinner"},"max_stars_repo_head_hexsha":{"kind":"string","value":"2a61911059ce95b609736eb5e07443b4a4bf08b3"},"max_stars_repo_licenses":{"kind":"list like","value":["Unlicense"],"string":"[\n \"Unlicense\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"Basics.ipynb"},"max_issues_repo_name":{"kind":"string","value":"Joe-mabus/Thanksgiving_Dinner"},"max_issues_repo_head_hexsha":{"kind":"string","value":"2a61911059ce95b609736eb5e07443b4a4bf08b3"},"max_issues_repo_licenses":{"kind":"list like","value":["Unlicense"],"string":"[\n \"Unlicense\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"Basics.ipynb"},"max_forks_repo_name":{"kind":"string","value":"Joe-mabus/Thanksgiving_Dinner"},"max_forks_repo_head_hexsha":{"kind":"string","value":"2a61911059ce95b609736eb5e07443b4a4bf08b3"},"max_forks_repo_licenses":{"kind":"list like","value":["Unlicense"],"string":"[\n \"Unlicense\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"avg_line_length":{"kind":"number","value":80.038647343,"string":"80.038647"},"max_line_length":{"kind":"number","value":1702,"string":"1,702"},"alphanum_fraction":{"kind":"number","value":0.6644736842,"string":"0.664474"},"cells":{"kind":"list like","value":[[["import pandas as pd \n\ndata = pd.read_csv(\"thanksgiving.csv\", encoding=\"Latin-1\")\n\n# print(data.head())\n\n# print(data.columns)\n\nPositive_survey_counts = data['Do you celebrate Thanksgiving?'].value_counts()\n\nPositive_surveys = data[data['Do you celebrate Thanksgiving?'] == 'Yes']\n\nMain_dish_counts = data['What is typically the main dish at your Thanksgiving dinner?'].value_counts()\n\nPositive_Tofurkey = data[data['What is typically the main dish at your Thanksgiving dinner?'] == 'Tofurkey']\n\nGravy_on_Tofurkey = Positive_Tofurkey['Do you typically have gravy?'].value_counts()\n\napple_isnull = data['Which type of pie is typically served at your Thanksgiving dinner? Please select all that apply. - Apple'].isnull()\npumpkin_isnull = data['Which type of pie is typically served at your Thanksgiving dinner? Please select all that apply. - Pumpkin'].isnull()\npecan_isnull = data['Which type of pie is typically served at your Thanksgiving dinner? Please select all that apply. - Pecan'].isnull()\n\nno_pies = apple_isnull & pumpkin_isnull & pecan_isnull\n\ndef string_to_income(string):\n if pd.isnull(string):\n return None\n string = string.split(' ')[0]\n if string == 'Prefer':\n return None\n else:\n string = string.split(' ')[0]\n string = string.replace('$','')\n string = string.replace(',','')\n return int(string)\n\ndata['int_income'] = data['How much total combined money did all members of your HOUSEHOLD earn last year?'].apply(string_to_income)\n \n# print(data['int_income'].describe())\n\ndef string_to_age(string):\n if pd.isnull(string):\n return None\n else: \n string = string.split(' ')[0]\n string = string.replace('+',\"\")\n return int(string)\n \ndata['int_age'] = data['Age'].apply(string_to_age)\n\n# print(data['int_age'].value_counts())\n# print(data['int_age'].describe())","_____no_output_____"],["low_income = data['int_income'] < 150000\nhigh_income = data['int_income'] > 150000\n\nlow_travel_results = data[low_income]['How far will you travel for Thanksgiving?'].value_counts(normalize = True)\nhigh_travel_results = data[high_income]['How far will you travel for Thanksgiving?'].value_counts(normalize = True) \n \nprint(low_travel_results)\nprint(high_travel_results)","_____no_output_____"],["So according to the results, there is a higher percentage of people who will \nhave Thanksgiving at home who make more that 150000.\n\nThere is a higher percentage of people who will travel for Thanksgiving in the \nless that 150000 bracket.","_____no_output_____"],["Friendsgiving1 = data.pivot_table(\n index = \"Have you ever tried to meet up with hometown friends on Thanksgiving night?\", \n columns = 'Have you ever attended a \"Friendsgiving?\"', \n values = \"int_age\")\n\nFriendsgiving2 = data.pivot_table(\n index = \"Have you ever tried to meet up with hometown friends on Thanksgiving night?\", \n columns = 'Have you ever attended a \"Friendsgiving?\"', \n values = \"int_income\")\nprint(Friendsgiving1)\nprint(Friendsgiving2)","Have you ever attended a \"Friendsgiving?\" No Yes\nHave you ever tried to meet up with hometown fr... \nNo 42.283702 37.010526\nYes 41.475410 33.976744\nHave you ever attended a \"Friendsgiving?\" No Yes\nHave you ever tried to meet up with hometown fr... \nNo 78914.549654 72894.736842\nYes 78750.000000 66019.736842\n"],["Looks like the younger and poorer you are, the more likely you are to attend a \nfriendsgiving. ","_____no_output_____"]]],"string":"[\n [\n [\n \"import pandas as pd \\n\\ndata = pd.read_csv(\\\"thanksgiving.csv\\\", encoding=\\\"Latin-1\\\")\\n\\n# print(data.head())\\n\\n# print(data.columns)\\n\\nPositive_survey_counts = data['Do you celebrate Thanksgiving?'].value_counts()\\n\\nPositive_surveys = data[data['Do you celebrate Thanksgiving?'] == 'Yes']\\n\\nMain_dish_counts = data['What is typically the main dish at your Thanksgiving dinner?'].value_counts()\\n\\nPositive_Tofurkey = data[data['What is typically the main dish at your Thanksgiving dinner?'] == 'Tofurkey']\\n\\nGravy_on_Tofurkey = Positive_Tofurkey['Do you typically have gravy?'].value_counts()\\n\\napple_isnull = data['Which type of pie is typically served at your Thanksgiving dinner? Please select all that apply. - Apple'].isnull()\\npumpkin_isnull = data['Which type of pie is typically served at your Thanksgiving dinner? Please select all that apply. - Pumpkin'].isnull()\\npecan_isnull = data['Which type of pie is typically served at your Thanksgiving dinner? Please select all that apply. - Pecan'].isnull()\\n\\nno_pies = apple_isnull & pumpkin_isnull & pecan_isnull\\n\\ndef string_to_income(string):\\n if pd.isnull(string):\\n return None\\n string = string.split(' ')[0]\\n if string == 'Prefer':\\n return None\\n else:\\n string = string.split(' ')[0]\\n string = string.replace('$','')\\n string = string.replace(',','')\\n return int(string)\\n\\ndata['int_income'] = data['How much total combined money did all members of your HOUSEHOLD earn last year?'].apply(string_to_income)\\n \\n# print(data['int_income'].describe())\\n\\ndef string_to_age(string):\\n if pd.isnull(string):\\n return None\\n else: \\n string = string.split(' ')[0]\\n string = string.replace('+',\\\"\\\")\\n return int(string)\\n \\ndata['int_age'] = data['Age'].apply(string_to_age)\\n\\n# print(data['int_age'].value_counts())\\n# print(data['int_age'].describe())\",\n \"_____no_output_____\"\n ],\n [\n \"low_income = data['int_income'] < 150000\\nhigh_income = data['int_income'] > 150000\\n\\nlow_travel_results = data[low_income]['How far will you travel for Thanksgiving?'].value_counts(normalize = True)\\nhigh_travel_results = data[high_income]['How far will you travel for Thanksgiving?'].value_counts(normalize = True) \\n \\nprint(low_travel_results)\\nprint(high_travel_results)\",\n \"_____no_output_____\"\n ],\n [\n \"So according to the results, there is a higher percentage of people who will \\nhave Thanksgiving at home who make more that 150000.\\n\\nThere is a higher percentage of people who will travel for Thanksgiving in the \\nless that 150000 bracket.\",\n \"_____no_output_____\"\n ],\n [\n \"Friendsgiving1 = data.pivot_table(\\n index = \\\"Have you ever tried to meet up with hometown friends on Thanksgiving night?\\\", \\n columns = 'Have you ever attended a \\\"Friendsgiving?\\\"', \\n values = \\\"int_age\\\")\\n\\nFriendsgiving2 = data.pivot_table(\\n index = \\\"Have you ever tried to meet up with hometown friends on Thanksgiving night?\\\", \\n columns = 'Have you ever attended a \\\"Friendsgiving?\\\"', \\n values = \\\"int_income\\\")\\nprint(Friendsgiving1)\\nprint(Friendsgiving2)\",\n \"Have you ever attended a \\\"Friendsgiving?\\\" No Yes\\nHave you ever tried to meet up with hometown fr... \\nNo 42.283702 37.010526\\nYes 41.475410 33.976744\\nHave you ever attended a \\\"Friendsgiving?\\\" No Yes\\nHave you ever tried to meet up with hometown fr... \\nNo 78914.549654 72894.736842\\nYes 78750.000000 66019.736842\\n\"\n ],\n [\n \"Looks like the younger and poorer you are, the more likely you are to attend a \\nfriendsgiving. \",\n \"_____no_output_____\"\n ]\n ]\n]"},"cell_types":{"kind":"list like","value":["code"],"string":"[\n \"code\"\n]"},"cell_type_groups":{"kind":"list like","value":[["code","code","code","code","code"]],"string":"[\n [\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\"\n ]\n]"}}},{"rowIdx":1459030,"cells":{"hexsha":{"kind":"string","value":"e7ee3885c95ea5cb8ba88b4937458ee64157d1a5"},"size":{"kind":"number","value":107979,"string":"107,979"},"ext":{"kind":"string","value":"ipynb"},"lang":{"kind":"string","value":"Jupyter Notebook"},"max_stars_repo_path":{"kind":"string","value":"ionized/galactic_plane_continuum_21cm.ipynb"},"max_stars_repo_name":{"kind":"string","value":"CambridgeUniversityPress/IntroductionInterstellarMedium"},"max_stars_repo_head_hexsha":{"kind":"string","value":"fbfe64c7d50d15da93ebf2fbc7d86d83cbf8941a"},"max_stars_repo_licenses":{"kind":"list like","value":["CC0-1.0"],"string":"[\n \"CC0-1.0\"\n]"},"max_stars_count":{"kind":"number","value":3,"string":"3"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2021-04-26T15:37:13.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2021-05-13T04:42:15.000Z"},"max_issues_repo_path":{"kind":"string","value":"ionized/galactic_plane_continuum_21cm.ipynb"},"max_issues_repo_name":{"kind":"string","value":"interstellarmedium/interstellarmedium.github.io"},"max_issues_repo_head_hexsha":{"kind":"string","value":"0440a5bd80052ab87575e70fc39acd4bf8e225b3"},"max_issues_repo_licenses":{"kind":"list like","value":["CC0-1.0"],"string":"[\n \"CC0-1.0\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"ionized/galactic_plane_continuum_21cm.ipynb"},"max_forks_repo_name":{"kind":"string","value":"interstellarmedium/interstellarmedium.github.io"},"max_forks_repo_head_hexsha":{"kind":"string","value":"0440a5bd80052ab87575e70fc39acd4bf8e225b3"},"max_forks_repo_licenses":{"kind":"list like","value":["CC0-1.0"],"string":"[\n \"CC0-1.0\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"avg_line_length":{"kind":"number","value":749.8541666667,"string":"749.854167"},"max_line_length":{"kind":"number","value":104026,"string":"104,026"},"alphanum_fraction":{"kind":"number","value":0.9374044953,"string":"0.937404"},"cells":{"kind":"list like","value":[[["## Introduction to the Interstellar Medium\n### Jonathan Williams","_____no_output_____"],["### Figure 6.3: portion of the Galactic plane in 21cm continuum showing bremsstrahlung and synchrotron sources","_____no_output_____"]],[["import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nfrom astropy.io import fits\nfrom astropy.wcs import WCS\nfrom astropy.visualization import (ImageNormalize, SqrtStretch, LogStretch, AsinhStretch)\n%matplotlib inline","_____no_output_____"],["fig = plt.figure(figsize=(14,7.5))\n\nhdu = fits.open('g330to340.i.fits')\nwcs1 = WCS(hdu[0])\nax1 = fig.add_subplot(111, projection=wcs1)\nim1 = hdu[0].data\nhd1 = hdu[0].header\nhdu.close()\n#print(hd1)\n\nimin, imax = 380, 730\nimcrop = im1[:, imin:imax]\n#print(imcrop.min(),imcrop.max())\n\nnorm = ImageNormalize(imcrop, vmin=-0.15, vmax=2.0, stretch=AsinhStretch(a=0.1))\nax1.imshow(imcrop, cmap='gray', origin='lower', norm=norm) \n\nax1.set_xlim(0,350)\nax1.set_ylim(0,180)\nplt.plot([0,350], [90,90], ls='dashed', color='white', lw=2)\nax1.text(82, 45, 'HII', color='white', fontsize=18, fontweight='normal')\nax1.text(316, 97, 'SNR', color='white', fontsize=18, fontweight='normal')\n\n# scale bar\ndx = hd1['CDELT1']\n#print(dx)\n# 40'' per pixel, make bar 1 deg = 90 pix\nxbar = 90\nx0 = 250\nx1 = x0 + xbar\ny0 = 12\ndy = 2\nax1.plot([x0,x1],[y0,y0], 'w-', lw=2)\nax1.plot([x0,x0],[y0-dy,y0+dy], 'w-', lw=2)\nax1.plot([x1,x1],[y0-dy,y0+dy], 'w-', lw=2)\n\n# this crashes binder\n#mpl.rc('text', usetex=True)\n#mpl.rcParams['text.latex.preamble']=[r\"\\usepackage{amsmath}\"]\n#ax1.text(0.5*(x0+x1), y0+1.5*dy, r'$\\boldsymbol{1^\\circ}$', color='white', fontsize=24, fontweight='heavy', ha='center')\n\n# but this works ok\nax1.text(0.5*(x0+x1), y0+1.5*dy, r'$1^\\circ$', color='white', fontsize=24, fontweight='heavy', ha='center')\nax1.text(0.03,0.91,'21cm continuum', {'color': 'w', 'fontsize': 28}, transform=ax1.transAxes)\n\nfor i in (0,1):\n ax1.coords[i].set_ticks_visible(False)\n ax1.coords[i].set_ticklabel_visible(False)\n ax1.coords[i].set_ticks_visible(False)\n ax1.coords[i].set_ticklabel_visible(False)\n ax1.coords[i].set_axislabel('')\n ax1.coords[i].set_axislabel('')\n\nplt.tight_layout()\nplt.savefig('galactic_plane_continuum_21cm.pdf')","-0.15332128 7.7325406\n"]]],"string":"[\n [\n [\n \"## Introduction to the Interstellar Medium\\n### Jonathan Williams\",\n \"_____no_output_____\"\n ],\n [\n \"### Figure 6.3: portion of the Galactic plane in 21cm continuum showing bremsstrahlung and synchrotron sources\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"import numpy as np\\nimport matplotlib.pyplot as plt\\nimport matplotlib as mpl\\nfrom astropy.io import fits\\nfrom astropy.wcs import WCS\\nfrom astropy.visualization import (ImageNormalize, SqrtStretch, LogStretch, AsinhStretch)\\n%matplotlib inline\",\n \"_____no_output_____\"\n ],\n [\n \"fig = plt.figure(figsize=(14,7.5))\\n\\nhdu = fits.open('g330to340.i.fits')\\nwcs1 = WCS(hdu[0])\\nax1 = fig.add_subplot(111, projection=wcs1)\\nim1 = hdu[0].data\\nhd1 = hdu[0].header\\nhdu.close()\\n#print(hd1)\\n\\nimin, imax = 380, 730\\nimcrop = im1[:, imin:imax]\\n#print(imcrop.min(),imcrop.max())\\n\\nnorm = ImageNormalize(imcrop, vmin=-0.15, vmax=2.0, stretch=AsinhStretch(a=0.1))\\nax1.imshow(imcrop, cmap='gray', origin='lower', norm=norm) \\n\\nax1.set_xlim(0,350)\\nax1.set_ylim(0,180)\\nplt.plot([0,350], [90,90], ls='dashed', color='white', lw=2)\\nax1.text(82, 45, 'HII', color='white', fontsize=18, fontweight='normal')\\nax1.text(316, 97, 'SNR', color='white', fontsize=18, fontweight='normal')\\n\\n# scale bar\\ndx = hd1['CDELT1']\\n#print(dx)\\n# 40'' per pixel, make bar 1 deg = 90 pix\\nxbar = 90\\nx0 = 250\\nx1 = x0 + xbar\\ny0 = 12\\ndy = 2\\nax1.plot([x0,x1],[y0,y0], 'w-', lw=2)\\nax1.plot([x0,x0],[y0-dy,y0+dy], 'w-', lw=2)\\nax1.plot([x1,x1],[y0-dy,y0+dy], 'w-', lw=2)\\n\\n# this crashes binder\\n#mpl.rc('text', usetex=True)\\n#mpl.rcParams['text.latex.preamble']=[r\\\"\\\\usepackage{amsmath}\\\"]\\n#ax1.text(0.5*(x0+x1), y0+1.5*dy, r'$\\\\boldsymbol{1^\\\\circ}$', color='white', fontsize=24, fontweight='heavy', ha='center')\\n\\n# but this works ok\\nax1.text(0.5*(x0+x1), y0+1.5*dy, r'$1^\\\\circ$', color='white', fontsize=24, fontweight='heavy', ha='center')\\nax1.text(0.03,0.91,'21cm continuum', {'color': 'w', 'fontsize': 28}, transform=ax1.transAxes)\\n\\nfor i in (0,1):\\n ax1.coords[i].set_ticks_visible(False)\\n ax1.coords[i].set_ticklabel_visible(False)\\n ax1.coords[i].set_ticks_visible(False)\\n ax1.coords[i].set_ticklabel_visible(False)\\n ax1.coords[i].set_axislabel('')\\n ax1.coords[i].set_axislabel('')\\n\\nplt.tight_layout()\\nplt.savefig('galactic_plane_continuum_21cm.pdf')\",\n \"-0.15332128 7.7325406\\n\"\n ]\n ]\n]"},"cell_types":{"kind":"list like","value":["markdown","code"],"string":"[\n \"markdown\",\n \"code\"\n]"},"cell_type_groups":{"kind":"list like","value":[["markdown","markdown"],["code","code"]],"string":"[\n [\n \"markdown\",\n \"markdown\"\n ],\n [\n \"code\",\n \"code\"\n ]\n]"}}},{"rowIdx":1459031,"cells":{"hexsha":{"kind":"string","value":"e7ee40f4459a491f4eca8b134f95189cab0c016b"},"size":{"kind":"number","value":39369,"string":"39,369"},"ext":{"kind":"string","value":"ipynb"},"lang":{"kind":"string","value":"Jupyter Notebook"},"max_stars_repo_path":{"kind":"string","value":"tv-script-generation/dlnd_tv_script_generation.ipynb"},"max_stars_repo_name":{"kind":"string","value":"duozhanggithub/Deep-Learning"},"max_stars_repo_head_hexsha":{"kind":"string","value":"31dc8ee2dc23b8a5f15337430b8a2c2f84b1749d"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2020-05-08T20:15:46.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2020-05-08T20:15:46.000Z"},"max_issues_repo_path":{"kind":"string","value":"tv-script-generation/dlnd_tv_script_generation.ipynb"},"max_issues_repo_name":{"kind":"string","value":"duozhanggithub/Deep-Learning"},"max_issues_repo_head_hexsha":{"kind":"string","value":"31dc8ee2dc23b8a5f15337430b8a2c2f84b1749d"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"tv-script-generation/dlnd_tv_script_generation.ipynb"},"max_forks_repo_name":{"kind":"string","value":"duozhanggithub/Deep-Learning"},"max_forks_repo_head_hexsha":{"kind":"string","value":"31dc8ee2dc23b8a5f15337430b8a2c2f84b1749d"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"avg_line_length":{"kind":"number","value":33.1388888889,"string":"33.138889"},"max_line_length":{"kind":"number","value":556,"string":"556"},"alphanum_fraction":{"kind":"number","value":0.5701186213,"string":"0.570119"},"cells":{"kind":"list like","value":[[["# TV Script Generation\nIn this project, you'll generate your own [Simpsons](https://en.wikipedia.org/wiki/The_Simpsons) TV scripts using RNNs. You'll be using part of the [Simpsons dataset](https://www.kaggle.com/wcukierski/the-simpsons-by-the-data) of scripts from 27 seasons. The Neural Network you'll build will generate a new TV script for a scene at [Moe's Tavern](https://simpsonswiki.com/wiki/Moe's_Tavern).\n## Get the Data\nThe data is already provided for you. You'll be using a subset of the original dataset. It consists of only the scenes in Moe's Tavern. This doesn't include other versions of the tavern, like \"Moe's Cavern\", \"Flaming Moe's\", \"Uncle Moe's Family Feed-Bag\", etc..","_____no_output_____"]],[["\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\nimport helper\n\ndata_dir = './data/simpsons/moes_tavern_lines.txt'\ntext = helper.load_data(data_dir)\n# Ignore notice, since we don't use it for analysing the data\ntext = text[81:]","_____no_output_____"]],[["## Explore the Data\nPlay around with `view_sentence_range` to view different parts of the data.","_____no_output_____"]],[["view_sentence_range = (0, 10)\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\nimport numpy as np\n\nprint('Dataset Stats')\nprint('Roughly the number of unique words: {}'.format(len({word: None for word in text.split()})))\nscenes = text.split('\\n\\n')\nprint('Number of scenes: {}'.format(len(scenes)))\nsentence_count_scene = [scene.count('\\n') for scene in scenes]\nprint('Average number of sentences in each scene: {}'.format(np.average(sentence_count_scene)))\n\nsentences = [sentence for scene in scenes for sentence in scene.split('\\n')]\nprint('Number of lines: {}'.format(len(sentences)))\nword_count_sentence = [len(sentence.split()) for sentence in sentences]\nprint('Average number of words in each line: {}'.format(np.average(word_count_sentence)))\n\nprint()\nprint('The sentences {} to {}:'.format(*view_sentence_range))\nprint('\\n'.join(text.split('\\n')[view_sentence_range[0]:view_sentence_range[1]]))","Dataset Stats\nRoughly the number of unique words: 11492\nNumber of scenes: 262\nAverage number of sentences in each scene: 15.251908396946565\nNumber of lines: 4258\nAverage number of words in each line: 11.50164396430249\n\nThe sentences 0 to 10:\n\nMoe_Szyslak: (INTO PHONE) Moe's Tavern. Where the elite meet to drink.\nBart_Simpson: Eh, yeah, hello, is Mike there? Last name, Rotch.\nMoe_Szyslak: (INTO PHONE) Hold on, I'll check. (TO BARFLIES) Mike Rotch. Mike Rotch. Hey, has anybody seen Mike Rotch, lately?\nMoe_Szyslak: (INTO PHONE) Listen you little puke. One of these days I'm gonna catch you, and I'm gonna carve my name on your back with an ice pick.\nMoe_Szyslak: What's the matter Homer? You're not your normal effervescent self.\nHomer_Simpson: I got my problems, Moe. Give me another one.\nMoe_Szyslak: Homer, hey, you should not drink to forget your problems.\nBarney_Gumble: Yeah, you should only drink to enhance your social skills.\n\n"]],[["## Implement Preprocessing Functions\nThe first thing to do to any dataset is preprocessing. Implement the following preprocessing functions below:\n- Lookup Table\n- Tokenize Punctuation\n\n### Lookup Table\nTo create a word embedding, you first need to transform the words to ids. In this function, create two dictionaries:\n- Dictionary to go from the words to an id, we'll call `vocab_to_int`\n- Dictionary to go from the id to word, we'll call `int_to_vocab`\n\nReturn these dictionaries in the following tuple `(vocab_to_int, int_to_vocab)`","_____no_output_____"]],[["import numpy as np\nimport problem_unittests as tests\n\ndef create_lookup_tables(text):\n \"\"\"\n Create lookup tables for vocabulary\n :param text: The text of tv scripts split into words\n :return: A tuple of dicts (vocab_to_int, int_to_vocab)\n \"\"\"\n # TODO: Implement Function\n text = list(set(text))\n #text_id = range(len(text))\n #int_to_vocab = dict(zip(text_id, text)) \n #vocab_to_int = dict(zip(text, text_id))\n int_to_vocab = {word_i: word for word_i, word in enumerate(text)}\n vocab_to_int = {word: word_i for word_i, word in int_to_vocab.items()}\n return vocab_to_int, int_to_vocab\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_create_lookup_tables(create_lookup_tables)","Tests Passed\n"]],[["### Tokenize Punctuation\nWe'll be splitting the script into a word array using spaces as delimiters. However, punctuations like periods and exclamation marks make it hard for the neural network to distinguish between the word \"bye\" and \"bye!\".\n\nImplement the function `token_lookup` to return a dict that will be used to tokenize symbols like \"!\" into \"||Exclamation_Mark||\". Create a dictionary for the following symbols where the symbol is the key and value is the token:\n- Period ( . )\n- Comma ( , )\n- Quotation Mark ( \" )\n- Semicolon ( ; )\n- Exclamation mark ( ! )\n- Question mark ( ? )\n- Left Parentheses ( ( )\n- Right Parentheses ( ) )\n- Dash ( -- )\n- Return ( \\n )\n\nThis dictionary will be used to token the symbols and add the delimiter (space) around it. This separates the symbols as it's own word, making it easier for the neural network to predict on the next word. Make sure you don't use a token that could be confused as a word. Instead of using the token \"dash\", try using something like \"||dash||\".","_____no_output_____"]],[["def token_lookup():\n \"\"\"\n Generate a dict to turn punctuation into a token.\n :return: Tokenize dictionary where the key is the punctuation and the value is the token\n \"\"\"\n # TODO: Implement Function\n keys = ['.', ',', '\"', ';', '!', '?', '(', ')', '--','\\n']\n values = ['||Period||','||Comma||','||Quotation_Mark||','||Semicolon||','||Exclamation_mark||','||Question_mark||','||Left_Parentheses||','||Right_Parentheses||','||Dash||','||Return||']\n token_lookup = dict(zip(keys,values))\n return token_lookup\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_tokenize(token_lookup)","Tests Passed\n"]],[["## Preprocess all the data and save it\nRunning the code cell below will preprocess all the data and save it to file.","_____no_output_____"]],[["\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\n# Preprocess Training, Validation, and Testing Data\nhelper.preprocess_and_save_data(data_dir, token_lookup, create_lookup_tables)","_____no_output_____"]],[["# Check Point\nThis is your first checkpoint. If you ever decide to come back to this notebook or have to restart the notebook, you can start from here. The preprocessed data has been saved to disk.","_____no_output_____"]],[["\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\nimport helper\nimport numpy as np\nimport problem_unittests as tests\n\nint_text, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()","_____no_output_____"]],[["## Build the Neural Network\nYou'll build the components necessary to build a RNN by implementing the following functions below:\n- get_inputs\n- get_init_cell\n- get_embed\n- build_rnn\n- build_nn\n- get_batches\n\n### Check the Version of TensorFlow and Access to GPU","_____no_output_____"]],[["\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\nfrom distutils.version import LooseVersion\nimport warnings\nimport tensorflow as tf\n\n# Check TensorFlow Version\nassert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer'\nprint('TensorFlow Version: {}'.format(tf.__version__))\n\n# Check for a GPU\nif not tf.test.gpu_device_name():\n warnings.warn('No GPU found. Please use a GPU to train your neural network.')\nelse:\n print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))","TensorFlow Version: 1.0.0\n"]],[["### Input\nImplement the `get_inputs()` function to create TF Placeholders for the Neural Network. It should create the following placeholders:\n- Input text placeholder named \"input\" using the [TF Placeholder](https://www.tensorflow.org/api_docs/python/tf/placeholder) `name` parameter.\n- Targets placeholder\n- Learning Rate placeholder\n\nReturn the placeholders in the following tuple `(Input, Targets, LearningRate)`","_____no_output_____"]],[["def get_inputs():\n \"\"\"\n Create TF Placeholders for input, targets, and learning rate.\n :return: Tuple (input, targets, learning rate)\n \"\"\"\n # TODO: Implement Function\n Input = tf.placeholder(dtype=tf.int32, shape=[None, None], name='input') \n Targets = tf.placeholder(dtype=tf.int32, shape=[None, None], name='targets') \n LearningRate = tf.placeholder(dtype=tf.float32, name='learning_rate') \n return Input, Targets, LearningRate\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_get_inputs(get_inputs)","Tests Passed\n"]],[["### Build RNN Cell and Initialize\nStack one or more [`BasicLSTMCells`](https://www.tensorflow.org/api_docs/python/tf/contrib/rnn/BasicLSTMCell) in a [`MultiRNNCell`](https://www.tensorflow.org/api_docs/python/tf/contrib/rnn/MultiRNNCell).\n- The Rnn size should be set using `rnn_size`\n- Initalize Cell State using the MultiRNNCell's [`zero_state()`](https://www.tensorflow.org/api_docs/python/tf/contrib/rnn/MultiRNNCell#zero_state) function\n - Apply the name \"initial_state\" to the initial state using [`tf.identity()`](https://www.tensorflow.org/api_docs/python/tf/identity)\n\nReturn the cell and initial state in the following tuple `(Cell, InitialState)`","_____no_output_____"]],[["def get_init_cell(batch_size, rnn_size):\n \"\"\"\n Create an RNN Cell and initialize it.\n :param batch_size: Size of batches\n :param rnn_size: Size of RNNs\n :return: Tuple (cell, initialize state)\n \"\"\"\n # TODO: Implement Function\n #rnn_layers = 2\n \n lstm = tf.contrib.rnn.BasicLSTMCell(rnn_size)\n Cell = tf.contrib.rnn.MultiRNNCell([lstm])\n #initial_state = Cell.zero_state(batch_size=tf.placeholder(dtype=tf.int32, shape=[]), dtype=tf.float32)\n InitialState = tf.identity(Cell.zero_state(batch_size, tf.float32), name = 'initial_state')\n #InitialState = tf.identity(initial_state, name='initial_state')\n return Cell, InitialState\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_get_init_cell(get_init_cell)","Tests Passed\n"]],[["### Word Embedding\nApply embedding to `input_data` using TensorFlow. Return the embedded sequence.","_____no_output_____"]],[["def get_embed(input_data, vocab_size, embed_dim):\n \"\"\"\n Create embedding for .\n :param input_data: TF placeholder for text input.\n :param vocab_size: Number of words in vocabulary.\n :param embed_dim: Number of embedding dimensions\n :return: Embedded input.\n \"\"\"\n # TODO: Implement Function\n embedding = tf.Variable(tf.random_uniform((vocab_size, embed_dim), -1, 1))\n embed = tf.nn.embedding_lookup(embedding, input_data)\n return embed\n\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_get_embed(get_embed)","Tests Passed\n"]],[["### Build RNN\nYou created a RNN Cell in the `get_init_cell()` function. Time to use the cell to create a RNN.\n- Build the RNN using the [`tf.nn.dynamic_rnn()`](https://www.tensorflow.org/api_docs/python/tf/nn/dynamic_rnn)\n - Apply the name \"final_state\" to the final state using [`tf.identity()`](https://www.tensorflow.org/api_docs/python/tf/identity)\n\nReturn the outputs and final_state state in the following tuple `(Outputs, FinalState)` ","_____no_output_____"]],[["def build_rnn(cell, inputs):\n \"\"\"\n Create a RNN using a RNN Cell\n :param cell: RNN Cell\n :param inputs: Input text data\n :return: Tuple (Outputs, Final State)\n \"\"\"\n # TODO: Implement Function\n Outputs, Final_State = tf.nn.dynamic_rnn(cell, inputs, dtype=tf.float32)\n FinalState = tf.identity(Final_State, name='final_state')\n return Outputs, FinalState\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_build_rnn(build_rnn)","Tests Passed\n"]],[["### Build the Neural Network\nApply the functions you implemented above to:\n- Apply embedding to `input_data` using your `get_embed(input_data, vocab_size, embed_dim)` function.\n- Build RNN using `cell` and your `build_rnn(cell, inputs)` function.\n- Apply a fully connected layer with a linear activation and `vocab_size` as the number of outputs.\n\nReturn the logits and final state in the following tuple (Logits, FinalState) ","_____no_output_____"]],[["def build_nn(cell, rnn_size, input_data, vocab_size, embed_dim):\n \"\"\"\n Build part of the neural network\n :param cell: RNN cell\n :param rnn_size: Size of rnns\n :param input_data: Input data\n :param vocab_size: Vocabulary size\n :param embed_dim: Number of embedding dimensions\n :return: Tuple (Logits, FinalState)\n \"\"\"\n # TODO: Implement Function\n embedding = get_embed(input_data, vocab_size, embed_dim)\n Outputs, FinalState = build_rnn(cell, embedding)\n Logits = tf.contrib.layers.fully_connected(Outputs, vocab_size, activation_fn=None)\n return Logits, FinalState\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_build_nn(build_nn)","Tests Passed\n"]],[["### Batches\nImplement `get_batches` to create batches of input and targets using `int_text`. The batches should be a Numpy array with the shape `(number of batches, 2, batch size, sequence length)`. Each batch contains two elements:\n- The first element is a single batch of **input** with the shape `[batch size, sequence length]`\n- The second element is a single batch of **targets** with the shape `[batch size, sequence length]`\n\nIf you can't fill the last batch with enough data, drop the last batch.\n\nFor exmple, `get_batches([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20], 3, 2)` would return a Numpy array of the following:\n```\n[\n # First Batch\n [\n # Batch of Input\n [[ 1 2], [ 7 8], [13 14]]\n # Batch of targets\n [[ 2 3], [ 8 9], [14 15]]\n ]\n\n # Second Batch\n [\n # Batch of Input\n [[ 3 4], [ 9 10], [15 16]]\n # Batch of targets\n [[ 4 5], [10 11], [16 17]]\n ]\n\n # Third Batch\n [\n # Batch of Input\n [[ 5 6], [11 12], [17 18]]\n # Batch of targets\n [[ 6 7], [12 13], [18 1]]\n ]\n]\n```\n\nNotice that the last target value in the last batch is the first input value of the first batch. In this case, `1`. This is a common technique used when creating sequence batches, although it is rather unintuitive.","_____no_output_____"]],[["def get_batches(int_text, batch_size, seq_length):\n \"\"\"\n Return batches of input and target\n :param int_text: Text with the words replaced by their ids\n :param batch_size: The size of batch\n :param seq_length: The length of sequence\n :return: Batches as a Numpy array\n \"\"\"\n # TODO: Implement Function\n n_batches = len(int_text)//(batch_size*seq_length)\n input_batch = np.array(int_text[0: n_batches * batch_size * seq_length])\n target_batch = np.array(int_text[1: n_batches * batch_size * seq_length])\n target_batch = np.append(target_batch, int_text[0])\n \n input_batchs = np.split(input_batch.reshape(batch_size, -1), n_batches, 1)\n target_batchs = np.split(target_batch.reshape(batch_size, -1), n_batches, 1)\n \n get_batches = list(zip(input_batchs, target_batchs))\n return np.array(get_batches)\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_get_batches(get_batches)","Tests Passed\n"]],[["## Neural Network Training\n### Hyperparameters\nTune the following parameters:\n\n- Set `num_epochs` to the number of epochs.\n- Set `batch_size` to the batch size.\n- Set `rnn_size` to the size of the RNNs.\n- Set `embed_dim` to the size of the embedding.\n- Set `seq_length` to the length of sequence.\n- Set `learning_rate` to the learning rate.\n- Set `show_every_n_batches` to the number of batches the neural network should print progress.","_____no_output_____"]],[["# Number of Epochs\nnum_epochs = 100\n# Batch Size\nbatch_size = 156\n# RNN Size\nrnn_size = 600\n# Embedding Dimension Size\nembed_dim = 500\n# Sequence Length\nseq_length = 14\n# Learning Rate\nlearning_rate = 0.001\n# Show stats for every n number of batches\nshow_every_n_batches = 100\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\nsave_dir = './save'","_____no_output_____"]],[["### Build the Graph\nBuild the graph using the neural network you implemented.","_____no_output_____"]],[["\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\nfrom tensorflow.contrib import seq2seq\n\ntrain_graph = tf.Graph()\nwith train_graph.as_default():\n vocab_size = len(int_to_vocab)\n input_text, targets, lr = get_inputs()\n input_data_shape = tf.shape(input_text)\n cell, initial_state = get_init_cell(input_data_shape[0], rnn_size)\n logits, final_state = build_nn(cell, rnn_size, input_text, vocab_size, embed_dim)\n\n # Probabilities for generating words\n probs = tf.nn.softmax(logits, name='probs')\n\n # Loss function\n cost = seq2seq.sequence_loss(\n logits,\n targets,\n tf.ones([input_data_shape[0], input_data_shape[1]]))\n\n # Optimizer\n optimizer = tf.train.AdamOptimizer(lr)\n\n # Gradient Clipping\n gradients = optimizer.compute_gradients(cost)\n capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients if grad is not None]\n train_op = optimizer.apply_gradients(capped_gradients)","_____no_output_____"]],[["## Train\nTrain the neural network on the preprocessed data. If you have a hard time getting a good loss, check the [forms](https://discussions.udacity.com/) to see if anyone is having the same problem.","_____no_output_____"]],[["\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\nbatches = get_batches(int_text, batch_size, seq_length)\n\nwith tf.Session(graph=train_graph) as sess:\n sess.run(tf.global_variables_initializer())\n\n for epoch_i in range(num_epochs):\n state = sess.run(initial_state, {input_text: batches[0][0]})\n\n for batch_i, (x, y) in enumerate(batches):\n feed = {\n input_text: x,\n targets: y,\n initial_state: state,\n lr: learning_rate}\n train_loss, state, _ = sess.run([cost, final_state, train_op], feed)\n\n # Show every batches\n if (epoch_i * len(batches) + batch_i) % show_every_n_batches == 0:\n print('Epoch {:>3} Batch {:>4}/{} train_loss = {:.3f}'.format(\n epoch_i,\n batch_i,\n len(batches),\n train_loss))\n\n # Save Model\n saver = tf.train.Saver()\n saver.save(sess, save_dir)\n print('Model Trained and Saved')","Epoch 0 Batch 0/31 train_loss = 8.825\nEpoch 3 Batch 7/31 train_loss = 5.159\nEpoch 6 Batch 14/31 train_loss = 4.528\nEpoch 9 Batch 21/31 train_loss = 4.046\nEpoch 12 Batch 28/31 train_loss = 3.626\nEpoch 16 Batch 4/31 train_loss = 3.317\nEpoch 19 Batch 11/31 train_loss = 3.031\nEpoch 22 Batch 18/31 train_loss = 2.765\nEpoch 25 Batch 25/31 train_loss = 2.474\nEpoch 29 Batch 1/31 train_loss = 2.178\nEpoch 32 Batch 8/31 train_loss = 2.101\nEpoch 35 Batch 15/31 train_loss = 1.774\nEpoch 38 Batch 22/31 train_loss = 1.655\nEpoch 41 Batch 29/31 train_loss = 1.581\nEpoch 45 Batch 5/31 train_loss = 1.388\nEpoch 48 Batch 12/31 train_loss = 1.260\nEpoch 51 Batch 19/31 train_loss = 1.038\nEpoch 54 Batch 26/31 train_loss = 1.010\nEpoch 58 Batch 2/31 train_loss = 0.891\nEpoch 61 Batch 9/31 train_loss = 0.773\nEpoch 64 Batch 16/31 train_loss = 0.718\nEpoch 67 Batch 23/31 train_loss = 0.642\nEpoch 70 Batch 30/31 train_loss = 0.591\nEpoch 74 Batch 6/31 train_loss = 0.534\nEpoch 77 Batch 13/31 train_loss = 0.482\nEpoch 80 Batch 20/31 train_loss = 0.438\nEpoch 83 Batch 27/31 train_loss = 0.359\nEpoch 87 Batch 3/31 train_loss = 0.369\nEpoch 90 Batch 10/31 train_loss = 0.338\nEpoch 93 Batch 17/31 train_loss = 0.300\nEpoch 96 Batch 24/31 train_loss = 0.291\nModel Trained and Saved\n"]],[["## Save Parameters\nSave `seq_length` and `save_dir` for generating a new TV script.","_____no_output_____"]],[["\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\n# Save parameters for checkpoint\nhelper.save_params((seq_length, save_dir))","_____no_output_____"]],[["# Checkpoint","_____no_output_____"]],[["\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\nimport tensorflow as tf\nimport numpy as np\nimport helper\nimport problem_unittests as tests\n\n_, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()\nseq_length, load_dir = helper.load_params()","_____no_output_____"]],[["## Implement Generate Functions\n### Get Tensors\nGet tensors from `loaded_graph` using the function [`get_tensor_by_name()`](https://www.tensorflow.org/api_docs/python/tf/Graph#get_tensor_by_name). Get the tensors using the following names:\n- \"input:0\"\n- \"initial_state:0\"\n- \"final_state:0\"\n- \"probs:0\"\n\nReturn the tensors in the following tuple `(InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)` ","_____no_output_____"]],[["def get_tensors(loaded_graph):\n \"\"\"\n Get input, initial state, final state, and probabilities tensor from \n :param loaded_graph: TensorFlow graph loaded from file\n :return: Tuple (InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)\n \"\"\"\n # TODO: Implement Function\n with loaded_graph.as_default() as g:\n InputTensor = loaded_graph.get_tensor_by_name(\"input:0\")\n InitialStateTensor = loaded_graph.get_tensor_by_name(\"initial_state:0\")\n FinalStateTensor = loaded_graph.get_tensor_by_name(\"final_state:0\")\n ProbsTensor = loaded_graph.get_tensor_by_name(\"probs:0\")\n return InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor\n\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_get_tensors(get_tensors)","Tests Passed\n"]],[["### Choose Word\nImplement the `pick_word()` function to select the next word using `probabilities`.","_____no_output_____"]],[["def pick_word(probabilities, int_to_vocab):\n \"\"\"\n Pick the next word in the generated text\n :param probabilities: Probabilites of the next word\n :param int_to_vocab: Dictionary of word ids as the keys and words as the values\n :return: String of the predicted word\n \"\"\"\n # TODO: Implement Function\n pick_word = np.random.choice(len(int_to_vocab), 1, p=probabilities)[0]\n pick_word = int_to_vocab.get(pick_word)\n return pick_word\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_pick_word(pick_word)","Tests Passed\n"]],[["## Generate TV Script\nThis will generate the TV script for you. Set `gen_length` to the length of TV script you want to generate.","_____no_output_____"]],[["gen_length = 200\n# homer_simpson, moe_szyslak, or Barney_Gumble\nprime_word = 'moe_szyslak'\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\nloaded_graph = tf.Graph()\nwith tf.Session(graph=loaded_graph) as sess:\n # Load saved model\n loader = tf.train.import_meta_graph(load_dir + '.meta')\n loader.restore(sess, load_dir)\n\n # Get Tensors from loaded model\n input_text, initial_state, final_state, probs = get_tensors(loaded_graph)\n\n # Sentences generation setup\n gen_sentences = [prime_word + ':']\n prev_state = sess.run(initial_state, {input_text: np.array([[1]])})\n\n # Generate sentences\n for n in range(gen_length):\n # Dynamic Input\n dyn_input = [[vocab_to_int[word] for word in gen_sentences[-seq_length:]]]\n dyn_seq_length = len(dyn_input[0])\n\n # Get Prediction\n probabilities, prev_state = sess.run(\n [probs, final_state],\n {input_text: dyn_input, initial_state: prev_state})\n \n pred_word = pick_word(probabilities[dyn_seq_length-1], int_to_vocab)\n\n gen_sentences.append(pred_word)\n \n # Remove tokens\n tv_script = ' '.join(gen_sentences)\n for key, token in token_dict.items():\n ending = ' ' if key in ['\\n', '(', '\"'] else ''\n tv_script = tv_script.replace(' ' + token.lower(), key)\n tv_script = tv_script.replace('\\n ', '\\n')\n tv_script = tv_script.replace('( ', '(')\n \n print(tv_script)","moe_szyslak: ah-ha, big mistake pal! hey moe, can you be the best book on you could never!\nhomer_simpson:(getting idea) but you're dea-d-d-dead.(three stooges scared sound)\ngrampa_simpson:(upbeat) i guess despite all sweet music, but then we pour it a beer at half something.\n\n\nlenny_leonard: hey, homer. r.\nhomer_simpson: moe, it's called!\nmoe_szyslak: guys, i'm gonna let him want to go to my dad\n\n\nhomer_simpson:(to moe) thirty cases of cough syrup. sign in the way.\nbarney_gumble: yeah, that's probably what i look at you, i'm too?\nmoe_szyslak: oh, here. the audience is still love over.\nmoe's_thoughts: this is kent brockman. and it begins,\" dear is to that!\nmoe_szyslak:(laughs) if you want to be back.\nvoice: excuse me, so you can either sit here in the back of my cruiser.\nhomer_simpson: well if i only got their secrets.\nlenny_leonard:(amiable) amanda\n"]],[["# The TV Script is Nonsensical\nIt's ok if the TV script doesn't make any sense. We trained on less than a megabyte of text. In order to get good results, you'll have to use a smaller vocabulary or get more data. Luckly there's more data! As we mentioned in the begging of this project, this is a subset of [another dataset](https://www.kaggle.com/wcukierski/the-simpsons-by-the-data). We didn't have you train on all the data, because that would take too long. However, you are free to train your neural network on all the data. After you complete the project, of course.\n# Submitting This Project\nWhen submitting this project, make sure to run all the cells before saving the notebook. Save the notebook file as \"dlnd_tv_script_generation.ipynb\" and save it as a HTML file under \"File\" -> \"Download as\". Include the \"helper.py\" and \"problem_unittests.py\" files in your submission.","_____no_output_____"]]],"string":"[\n [\n [\n \"# TV Script Generation\\nIn this project, you'll generate your own [Simpsons](https://en.wikipedia.org/wiki/The_Simpsons) TV scripts using RNNs. You'll be using part of the [Simpsons dataset](https://www.kaggle.com/wcukierski/the-simpsons-by-the-data) of scripts from 27 seasons. The Neural Network you'll build will generate a new TV script for a scene at [Moe's Tavern](https://simpsonswiki.com/wiki/Moe's_Tavern).\\n## Get the Data\\nThe data is already provided for you. You'll be using a subset of the original dataset. It consists of only the scenes in Moe's Tavern. This doesn't include other versions of the tavern, like \\\"Moe's Cavern\\\", \\\"Flaming Moe's\\\", \\\"Uncle Moe's Family Feed-Bag\\\", etc..\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"\\\"\\\"\\\"\\nDON'T MODIFY ANYTHING IN THIS CELL\\n\\\"\\\"\\\"\\nimport helper\\n\\ndata_dir = './data/simpsons/moes_tavern_lines.txt'\\ntext = helper.load_data(data_dir)\\n# Ignore notice, since we don't use it for analysing the data\\ntext = text[81:]\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"## Explore the Data\\nPlay around with `view_sentence_range` to view different parts of the data.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"view_sentence_range = (0, 10)\\n\\n\\\"\\\"\\\"\\nDON'T MODIFY ANYTHING IN THIS CELL\\n\\\"\\\"\\\"\\nimport numpy as np\\n\\nprint('Dataset Stats')\\nprint('Roughly the number of unique words: {}'.format(len({word: None for word in text.split()})))\\nscenes = text.split('\\\\n\\\\n')\\nprint('Number of scenes: {}'.format(len(scenes)))\\nsentence_count_scene = [scene.count('\\\\n') for scene in scenes]\\nprint('Average number of sentences in each scene: {}'.format(np.average(sentence_count_scene)))\\n\\nsentences = [sentence for scene in scenes for sentence in scene.split('\\\\n')]\\nprint('Number of lines: {}'.format(len(sentences)))\\nword_count_sentence = [len(sentence.split()) for sentence in sentences]\\nprint('Average number of words in each line: {}'.format(np.average(word_count_sentence)))\\n\\nprint()\\nprint('The sentences {} to {}:'.format(*view_sentence_range))\\nprint('\\\\n'.join(text.split('\\\\n')[view_sentence_range[0]:view_sentence_range[1]]))\",\n \"Dataset Stats\\nRoughly the number of unique words: 11492\\nNumber of scenes: 262\\nAverage number of sentences in each scene: 15.251908396946565\\nNumber of lines: 4258\\nAverage number of words in each line: 11.50164396430249\\n\\nThe sentences 0 to 10:\\n\\nMoe_Szyslak: (INTO PHONE) Moe's Tavern. Where the elite meet to drink.\\nBart_Simpson: Eh, yeah, hello, is Mike there? Last name, Rotch.\\nMoe_Szyslak: (INTO PHONE) Hold on, I'll check. (TO BARFLIES) Mike Rotch. Mike Rotch. Hey, has anybody seen Mike Rotch, lately?\\nMoe_Szyslak: (INTO PHONE) Listen you little puke. One of these days I'm gonna catch you, and I'm gonna carve my name on your back with an ice pick.\\nMoe_Szyslak: What's the matter Homer? You're not your normal effervescent self.\\nHomer_Simpson: I got my problems, Moe. Give me another one.\\nMoe_Szyslak: Homer, hey, you should not drink to forget your problems.\\nBarney_Gumble: Yeah, you should only drink to enhance your social skills.\\n\\n\"\n ]\n ],\n [\n [\n \"## Implement Preprocessing Functions\\nThe first thing to do to any dataset is preprocessing. Implement the following preprocessing functions below:\\n- Lookup Table\\n- Tokenize Punctuation\\n\\n### Lookup Table\\nTo create a word embedding, you first need to transform the words to ids. In this function, create two dictionaries:\\n- Dictionary to go from the words to an id, we'll call `vocab_to_int`\\n- Dictionary to go from the id to word, we'll call `int_to_vocab`\\n\\nReturn these dictionaries in the following tuple `(vocab_to_int, int_to_vocab)`\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"import numpy as np\\nimport problem_unittests as tests\\n\\ndef create_lookup_tables(text):\\n \\\"\\\"\\\"\\n Create lookup tables for vocabulary\\n :param text: The text of tv scripts split into words\\n :return: A tuple of dicts (vocab_to_int, int_to_vocab)\\n \\\"\\\"\\\"\\n # TODO: Implement Function\\n text = list(set(text))\\n #text_id = range(len(text))\\n #int_to_vocab = dict(zip(text_id, text)) \\n #vocab_to_int = dict(zip(text, text_id))\\n int_to_vocab = {word_i: word for word_i, word in enumerate(text)}\\n vocab_to_int = {word: word_i for word_i, word in int_to_vocab.items()}\\n return vocab_to_int, int_to_vocab\\n\\n\\\"\\\"\\\"\\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\\n\\\"\\\"\\\"\\ntests.test_create_lookup_tables(create_lookup_tables)\",\n \"Tests Passed\\n\"\n ]\n ],\n [\n [\n \"### Tokenize Punctuation\\nWe'll be splitting the script into a word array using spaces as delimiters. However, punctuations like periods and exclamation marks make it hard for the neural network to distinguish between the word \\\"bye\\\" and \\\"bye!\\\".\\n\\nImplement the function `token_lookup` to return a dict that will be used to tokenize symbols like \\\"!\\\" into \\\"||Exclamation_Mark||\\\". Create a dictionary for the following symbols where the symbol is the key and value is the token:\\n- Period ( . )\\n- Comma ( , )\\n- Quotation Mark ( \\\" )\\n- Semicolon ( ; )\\n- Exclamation mark ( ! )\\n- Question mark ( ? )\\n- Left Parentheses ( ( )\\n- Right Parentheses ( ) )\\n- Dash ( -- )\\n- Return ( \\\\n )\\n\\nThis dictionary will be used to token the symbols and add the delimiter (space) around it. This separates the symbols as it's own word, making it easier for the neural network to predict on the next word. Make sure you don't use a token that could be confused as a word. Instead of using the token \\\"dash\\\", try using something like \\\"||dash||\\\".\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"def token_lookup():\\n \\\"\\\"\\\"\\n Generate a dict to turn punctuation into a token.\\n :return: Tokenize dictionary where the key is the punctuation and the value is the token\\n \\\"\\\"\\\"\\n # TODO: Implement Function\\n keys = ['.', ',', '\\\"', ';', '!', '?', '(', ')', '--','\\\\n']\\n values = ['||Period||','||Comma||','||Quotation_Mark||','||Semicolon||','||Exclamation_mark||','||Question_mark||','||Left_Parentheses||','||Right_Parentheses||','||Dash||','||Return||']\\n token_lookup = dict(zip(keys,values))\\n return token_lookup\\n\\n\\\"\\\"\\\"\\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\\n\\\"\\\"\\\"\\ntests.test_tokenize(token_lookup)\",\n \"Tests Passed\\n\"\n ]\n ],\n [\n [\n \"## Preprocess all the data and save it\\nRunning the code cell below will preprocess all the data and save it to file.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"\\\"\\\"\\\"\\nDON'T MODIFY ANYTHING IN THIS CELL\\n\\\"\\\"\\\"\\n# Preprocess Training, Validation, and Testing Data\\nhelper.preprocess_and_save_data(data_dir, token_lookup, create_lookup_tables)\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# Check Point\\nThis is your first checkpoint. If you ever decide to come back to this notebook or have to restart the notebook, you can start from here. The preprocessed data has been saved to disk.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"\\\"\\\"\\\"\\nDON'T MODIFY ANYTHING IN THIS CELL\\n\\\"\\\"\\\"\\nimport helper\\nimport numpy as np\\nimport problem_unittests as tests\\n\\nint_text, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"## Build the Neural Network\\nYou'll build the components necessary to build a RNN by implementing the following functions below:\\n- get_inputs\\n- get_init_cell\\n- get_embed\\n- build_rnn\\n- build_nn\\n- get_batches\\n\\n### Check the Version of TensorFlow and Access to GPU\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"\\\"\\\"\\\"\\nDON'T MODIFY ANYTHING IN THIS CELL\\n\\\"\\\"\\\"\\nfrom distutils.version import LooseVersion\\nimport warnings\\nimport tensorflow as tf\\n\\n# Check TensorFlow Version\\nassert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer'\\nprint('TensorFlow Version: {}'.format(tf.__version__))\\n\\n# Check for a GPU\\nif not tf.test.gpu_device_name():\\n warnings.warn('No GPU found. Please use a GPU to train your neural network.')\\nelse:\\n print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))\",\n \"TensorFlow Version: 1.0.0\\n\"\n ]\n ],\n [\n [\n \"### Input\\nImplement the `get_inputs()` function to create TF Placeholders for the Neural Network. It should create the following placeholders:\\n- Input text placeholder named \\\"input\\\" using the [TF Placeholder](https://www.tensorflow.org/api_docs/python/tf/placeholder) `name` parameter.\\n- Targets placeholder\\n- Learning Rate placeholder\\n\\nReturn the placeholders in the following tuple `(Input, Targets, LearningRate)`\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"def get_inputs():\\n \\\"\\\"\\\"\\n Create TF Placeholders for input, targets, and learning rate.\\n :return: Tuple (input, targets, learning rate)\\n \\\"\\\"\\\"\\n # TODO: Implement Function\\n Input = tf.placeholder(dtype=tf.int32, shape=[None, None], name='input') \\n Targets = tf.placeholder(dtype=tf.int32, shape=[None, None], name='targets') \\n LearningRate = tf.placeholder(dtype=tf.float32, name='learning_rate') \\n return Input, Targets, LearningRate\\n\\n\\\"\\\"\\\"\\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\\n\\\"\\\"\\\"\\ntests.test_get_inputs(get_inputs)\",\n \"Tests Passed\\n\"\n ]\n ],\n [\n [\n \"### Build RNN Cell and Initialize\\nStack one or more [`BasicLSTMCells`](https://www.tensorflow.org/api_docs/python/tf/contrib/rnn/BasicLSTMCell) in a [`MultiRNNCell`](https://www.tensorflow.org/api_docs/python/tf/contrib/rnn/MultiRNNCell).\\n- The Rnn size should be set using `rnn_size`\\n- Initalize Cell State using the MultiRNNCell's [`zero_state()`](https://www.tensorflow.org/api_docs/python/tf/contrib/rnn/MultiRNNCell#zero_state) function\\n - Apply the name \\\"initial_state\\\" to the initial state using [`tf.identity()`](https://www.tensorflow.org/api_docs/python/tf/identity)\\n\\nReturn the cell and initial state in the following tuple `(Cell, InitialState)`\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"def get_init_cell(batch_size, rnn_size):\\n \\\"\\\"\\\"\\n Create an RNN Cell and initialize it.\\n :param batch_size: Size of batches\\n :param rnn_size: Size of RNNs\\n :return: Tuple (cell, initialize state)\\n \\\"\\\"\\\"\\n # TODO: Implement Function\\n #rnn_layers = 2\\n \\n lstm = tf.contrib.rnn.BasicLSTMCell(rnn_size)\\n Cell = tf.contrib.rnn.MultiRNNCell([lstm])\\n #initial_state = Cell.zero_state(batch_size=tf.placeholder(dtype=tf.int32, shape=[]), dtype=tf.float32)\\n InitialState = tf.identity(Cell.zero_state(batch_size, tf.float32), name = 'initial_state')\\n #InitialState = tf.identity(initial_state, name='initial_state')\\n return Cell, InitialState\\n\\n\\\"\\\"\\\"\\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\\n\\\"\\\"\\\"\\ntests.test_get_init_cell(get_init_cell)\",\n \"Tests Passed\\n\"\n ]\n ],\n [\n [\n \"### Word Embedding\\nApply embedding to `input_data` using TensorFlow. Return the embedded sequence.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"def get_embed(input_data, vocab_size, embed_dim):\\n \\\"\\\"\\\"\\n Create embedding for .\\n :param input_data: TF placeholder for text input.\\n :param vocab_size: Number of words in vocabulary.\\n :param embed_dim: Number of embedding dimensions\\n :return: Embedded input.\\n \\\"\\\"\\\"\\n # TODO: Implement Function\\n embedding = tf.Variable(tf.random_uniform((vocab_size, embed_dim), -1, 1))\\n embed = tf.nn.embedding_lookup(embedding, input_data)\\n return embed\\n\\n\\n\\\"\\\"\\\"\\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\\n\\\"\\\"\\\"\\ntests.test_get_embed(get_embed)\",\n \"Tests Passed\\n\"\n ]\n ],\n [\n [\n \"### Build RNN\\nYou created a RNN Cell in the `get_init_cell()` function. Time to use the cell to create a RNN.\\n- Build the RNN using the [`tf.nn.dynamic_rnn()`](https://www.tensorflow.org/api_docs/python/tf/nn/dynamic_rnn)\\n - Apply the name \\\"final_state\\\" to the final state using [`tf.identity()`](https://www.tensorflow.org/api_docs/python/tf/identity)\\n\\nReturn the outputs and final_state state in the following tuple `(Outputs, FinalState)` \",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"def build_rnn(cell, inputs):\\n \\\"\\\"\\\"\\n Create a RNN using a RNN Cell\\n :param cell: RNN Cell\\n :param inputs: Input text data\\n :return: Tuple (Outputs, Final State)\\n \\\"\\\"\\\"\\n # TODO: Implement Function\\n Outputs, Final_State = tf.nn.dynamic_rnn(cell, inputs, dtype=tf.float32)\\n FinalState = tf.identity(Final_State, name='final_state')\\n return Outputs, FinalState\\n\\n\\\"\\\"\\\"\\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\\n\\\"\\\"\\\"\\ntests.test_build_rnn(build_rnn)\",\n \"Tests Passed\\n\"\n ]\n ],\n [\n [\n \"### Build the Neural Network\\nApply the functions you implemented above to:\\n- Apply embedding to `input_data` using your `get_embed(input_data, vocab_size, embed_dim)` function.\\n- Build RNN using `cell` and your `build_rnn(cell, inputs)` function.\\n- Apply a fully connected layer with a linear activation and `vocab_size` as the number of outputs.\\n\\nReturn the logits and final state in the following tuple (Logits, FinalState) \",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"def build_nn(cell, rnn_size, input_data, vocab_size, embed_dim):\\n \\\"\\\"\\\"\\n Build part of the neural network\\n :param cell: RNN cell\\n :param rnn_size: Size of rnns\\n :param input_data: Input data\\n :param vocab_size: Vocabulary size\\n :param embed_dim: Number of embedding dimensions\\n :return: Tuple (Logits, FinalState)\\n \\\"\\\"\\\"\\n # TODO: Implement Function\\n embedding = get_embed(input_data, vocab_size, embed_dim)\\n Outputs, FinalState = build_rnn(cell, embedding)\\n Logits = tf.contrib.layers.fully_connected(Outputs, vocab_size, activation_fn=None)\\n return Logits, FinalState\\n\\n\\\"\\\"\\\"\\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\\n\\\"\\\"\\\"\\ntests.test_build_nn(build_nn)\",\n \"Tests Passed\\n\"\n ]\n ],\n [\n [\n \"### Batches\\nImplement `get_batches` to create batches of input and targets using `int_text`. The batches should be a Numpy array with the shape `(number of batches, 2, batch size, sequence length)`. Each batch contains two elements:\\n- The first element is a single batch of **input** with the shape `[batch size, sequence length]`\\n- The second element is a single batch of **targets** with the shape `[batch size, sequence length]`\\n\\nIf you can't fill the last batch with enough data, drop the last batch.\\n\\nFor exmple, `get_batches([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20], 3, 2)` would return a Numpy array of the following:\\n```\\n[\\n # First Batch\\n [\\n # Batch of Input\\n [[ 1 2], [ 7 8], [13 14]]\\n # Batch of targets\\n [[ 2 3], [ 8 9], [14 15]]\\n ]\\n\\n # Second Batch\\n [\\n # Batch of Input\\n [[ 3 4], [ 9 10], [15 16]]\\n # Batch of targets\\n [[ 4 5], [10 11], [16 17]]\\n ]\\n\\n # Third Batch\\n [\\n # Batch of Input\\n [[ 5 6], [11 12], [17 18]]\\n # Batch of targets\\n [[ 6 7], [12 13], [18 1]]\\n ]\\n]\\n```\\n\\nNotice that the last target value in the last batch is the first input value of the first batch. In this case, `1`. This is a common technique used when creating sequence batches, although it is rather unintuitive.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"def get_batches(int_text, batch_size, seq_length):\\n \\\"\\\"\\\"\\n Return batches of input and target\\n :param int_text: Text with the words replaced by their ids\\n :param batch_size: The size of batch\\n :param seq_length: The length of sequence\\n :return: Batches as a Numpy array\\n \\\"\\\"\\\"\\n # TODO: Implement Function\\n n_batches = len(int_text)//(batch_size*seq_length)\\n input_batch = np.array(int_text[0: n_batches * batch_size * seq_length])\\n target_batch = np.array(int_text[1: n_batches * batch_size * seq_length])\\n target_batch = np.append(target_batch, int_text[0])\\n \\n input_batchs = np.split(input_batch.reshape(batch_size, -1), n_batches, 1)\\n target_batchs = np.split(target_batch.reshape(batch_size, -1), n_batches, 1)\\n \\n get_batches = list(zip(input_batchs, target_batchs))\\n return np.array(get_batches)\\n\\n\\\"\\\"\\\"\\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\\n\\\"\\\"\\\"\\ntests.test_get_batches(get_batches)\",\n \"Tests Passed\\n\"\n ]\n ],\n [\n [\n \"## Neural Network Training\\n### Hyperparameters\\nTune the following parameters:\\n\\n- Set `num_epochs` to the number of epochs.\\n- Set `batch_size` to the batch size.\\n- Set `rnn_size` to the size of the RNNs.\\n- Set `embed_dim` to the size of the embedding.\\n- Set `seq_length` to the length of sequence.\\n- Set `learning_rate` to the learning rate.\\n- Set `show_every_n_batches` to the number of batches the neural network should print progress.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# Number of Epochs\\nnum_epochs = 100\\n# Batch Size\\nbatch_size = 156\\n# RNN Size\\nrnn_size = 600\\n# Embedding Dimension Size\\nembed_dim = 500\\n# Sequence Length\\nseq_length = 14\\n# Learning Rate\\nlearning_rate = 0.001\\n# Show stats for every n number of batches\\nshow_every_n_batches = 100\\n\\n\\\"\\\"\\\"\\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\\n\\\"\\\"\\\"\\nsave_dir = './save'\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"### Build the Graph\\nBuild the graph using the neural network you implemented.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"\\\"\\\"\\\"\\nDON'T MODIFY ANYTHING IN THIS CELL\\n\\\"\\\"\\\"\\nfrom tensorflow.contrib import seq2seq\\n\\ntrain_graph = tf.Graph()\\nwith train_graph.as_default():\\n vocab_size = len(int_to_vocab)\\n input_text, targets, lr = get_inputs()\\n input_data_shape = tf.shape(input_text)\\n cell, initial_state = get_init_cell(input_data_shape[0], rnn_size)\\n logits, final_state = build_nn(cell, rnn_size, input_text, vocab_size, embed_dim)\\n\\n # Probabilities for generating words\\n probs = tf.nn.softmax(logits, name='probs')\\n\\n # Loss function\\n cost = seq2seq.sequence_loss(\\n logits,\\n targets,\\n tf.ones([input_data_shape[0], input_data_shape[1]]))\\n\\n # Optimizer\\n optimizer = tf.train.AdamOptimizer(lr)\\n\\n # Gradient Clipping\\n gradients = optimizer.compute_gradients(cost)\\n capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients if grad is not None]\\n train_op = optimizer.apply_gradients(capped_gradients)\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"## Train\\nTrain the neural network on the preprocessed data. If you have a hard time getting a good loss, check the [forms](https://discussions.udacity.com/) to see if anyone is having the same problem.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"\\\"\\\"\\\"\\nDON'T MODIFY ANYTHING IN THIS CELL\\n\\\"\\\"\\\"\\nbatches = get_batches(int_text, batch_size, seq_length)\\n\\nwith tf.Session(graph=train_graph) as sess:\\n sess.run(tf.global_variables_initializer())\\n\\n for epoch_i in range(num_epochs):\\n state = sess.run(initial_state, {input_text: batches[0][0]})\\n\\n for batch_i, (x, y) in enumerate(batches):\\n feed = {\\n input_text: x,\\n targets: y,\\n initial_state: state,\\n lr: learning_rate}\\n train_loss, state, _ = sess.run([cost, final_state, train_op], feed)\\n\\n # Show every batches\\n if (epoch_i * len(batches) + batch_i) % show_every_n_batches == 0:\\n print('Epoch {:>3} Batch {:>4}/{} train_loss = {:.3f}'.format(\\n epoch_i,\\n batch_i,\\n len(batches),\\n train_loss))\\n\\n # Save Model\\n saver = tf.train.Saver()\\n saver.save(sess, save_dir)\\n print('Model Trained and Saved')\",\n \"Epoch 0 Batch 0/31 train_loss = 8.825\\nEpoch 3 Batch 7/31 train_loss = 5.159\\nEpoch 6 Batch 14/31 train_loss = 4.528\\nEpoch 9 Batch 21/31 train_loss = 4.046\\nEpoch 12 Batch 28/31 train_loss = 3.626\\nEpoch 16 Batch 4/31 train_loss = 3.317\\nEpoch 19 Batch 11/31 train_loss = 3.031\\nEpoch 22 Batch 18/31 train_loss = 2.765\\nEpoch 25 Batch 25/31 train_loss = 2.474\\nEpoch 29 Batch 1/31 train_loss = 2.178\\nEpoch 32 Batch 8/31 train_loss = 2.101\\nEpoch 35 Batch 15/31 train_loss = 1.774\\nEpoch 38 Batch 22/31 train_loss = 1.655\\nEpoch 41 Batch 29/31 train_loss = 1.581\\nEpoch 45 Batch 5/31 train_loss = 1.388\\nEpoch 48 Batch 12/31 train_loss = 1.260\\nEpoch 51 Batch 19/31 train_loss = 1.038\\nEpoch 54 Batch 26/31 train_loss = 1.010\\nEpoch 58 Batch 2/31 train_loss = 0.891\\nEpoch 61 Batch 9/31 train_loss = 0.773\\nEpoch 64 Batch 16/31 train_loss = 0.718\\nEpoch 67 Batch 23/31 train_loss = 0.642\\nEpoch 70 Batch 30/31 train_loss = 0.591\\nEpoch 74 Batch 6/31 train_loss = 0.534\\nEpoch 77 Batch 13/31 train_loss = 0.482\\nEpoch 80 Batch 20/31 train_loss = 0.438\\nEpoch 83 Batch 27/31 train_loss = 0.359\\nEpoch 87 Batch 3/31 train_loss = 0.369\\nEpoch 90 Batch 10/31 train_loss = 0.338\\nEpoch 93 Batch 17/31 train_loss = 0.300\\nEpoch 96 Batch 24/31 train_loss = 0.291\\nModel Trained and Saved\\n\"\n ]\n ],\n [\n [\n \"## Save Parameters\\nSave `seq_length` and `save_dir` for generating a new TV script.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"\\\"\\\"\\\"\\nDON'T MODIFY ANYTHING IN THIS CELL\\n\\\"\\\"\\\"\\n# Save parameters for checkpoint\\nhelper.save_params((seq_length, save_dir))\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# Checkpoint\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"\\\"\\\"\\\"\\nDON'T MODIFY ANYTHING IN THIS CELL\\n\\\"\\\"\\\"\\nimport tensorflow as tf\\nimport numpy as np\\nimport helper\\nimport problem_unittests as tests\\n\\n_, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()\\nseq_length, load_dir = helper.load_params()\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"## Implement Generate Functions\\n### Get Tensors\\nGet tensors from `loaded_graph` using the function [`get_tensor_by_name()`](https://www.tensorflow.org/api_docs/python/tf/Graph#get_tensor_by_name). Get the tensors using the following names:\\n- \\\"input:0\\\"\\n- \\\"initial_state:0\\\"\\n- \\\"final_state:0\\\"\\n- \\\"probs:0\\\"\\n\\nReturn the tensors in the following tuple `(InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)` \",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"def get_tensors(loaded_graph):\\n \\\"\\\"\\\"\\n Get input, initial state, final state, and probabilities tensor from \\n :param loaded_graph: TensorFlow graph loaded from file\\n :return: Tuple (InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)\\n \\\"\\\"\\\"\\n # TODO: Implement Function\\n with loaded_graph.as_default() as g:\\n InputTensor = loaded_graph.get_tensor_by_name(\\\"input:0\\\")\\n InitialStateTensor = loaded_graph.get_tensor_by_name(\\\"initial_state:0\\\")\\n FinalStateTensor = loaded_graph.get_tensor_by_name(\\\"final_state:0\\\")\\n ProbsTensor = loaded_graph.get_tensor_by_name(\\\"probs:0\\\")\\n return InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor\\n\\n\\n\\\"\\\"\\\"\\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\\n\\\"\\\"\\\"\\ntests.test_get_tensors(get_tensors)\",\n \"Tests Passed\\n\"\n ]\n ],\n [\n [\n \"### Choose Word\\nImplement the `pick_word()` function to select the next word using `probabilities`.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"def pick_word(probabilities, int_to_vocab):\\n \\\"\\\"\\\"\\n Pick the next word in the generated text\\n :param probabilities: Probabilites of the next word\\n :param int_to_vocab: Dictionary of word ids as the keys and words as the values\\n :return: String of the predicted word\\n \\\"\\\"\\\"\\n # TODO: Implement Function\\n pick_word = np.random.choice(len(int_to_vocab), 1, p=probabilities)[0]\\n pick_word = int_to_vocab.get(pick_word)\\n return pick_word\\n\\n\\\"\\\"\\\"\\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\\n\\\"\\\"\\\"\\ntests.test_pick_word(pick_word)\",\n \"Tests Passed\\n\"\n ]\n ],\n [\n [\n \"## Generate TV Script\\nThis will generate the TV script for you. Set `gen_length` to the length of TV script you want to generate.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"gen_length = 200\\n# homer_simpson, moe_szyslak, or Barney_Gumble\\nprime_word = 'moe_szyslak'\\n\\n\\\"\\\"\\\"\\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\\n\\\"\\\"\\\"\\nloaded_graph = tf.Graph()\\nwith tf.Session(graph=loaded_graph) as sess:\\n # Load saved model\\n loader = tf.train.import_meta_graph(load_dir + '.meta')\\n loader.restore(sess, load_dir)\\n\\n # Get Tensors from loaded model\\n input_text, initial_state, final_state, probs = get_tensors(loaded_graph)\\n\\n # Sentences generation setup\\n gen_sentences = [prime_word + ':']\\n prev_state = sess.run(initial_state, {input_text: np.array([[1]])})\\n\\n # Generate sentences\\n for n in range(gen_length):\\n # Dynamic Input\\n dyn_input = [[vocab_to_int[word] for word in gen_sentences[-seq_length:]]]\\n dyn_seq_length = len(dyn_input[0])\\n\\n # Get Prediction\\n probabilities, prev_state = sess.run(\\n [probs, final_state],\\n {input_text: dyn_input, initial_state: prev_state})\\n \\n pred_word = pick_word(probabilities[dyn_seq_length-1], int_to_vocab)\\n\\n gen_sentences.append(pred_word)\\n \\n # Remove tokens\\n tv_script = ' '.join(gen_sentences)\\n for key, token in token_dict.items():\\n ending = ' ' if key in ['\\\\n', '(', '\\\"'] else ''\\n tv_script = tv_script.replace(' ' + token.lower(), key)\\n tv_script = tv_script.replace('\\\\n ', '\\\\n')\\n tv_script = tv_script.replace('( ', '(')\\n \\n print(tv_script)\",\n \"moe_szyslak: ah-ha, big mistake pal! hey moe, can you be the best book on you could never!\\nhomer_simpson:(getting idea) but you're dea-d-d-dead.(three stooges scared sound)\\ngrampa_simpson:(upbeat) i guess despite all sweet music, but then we pour it a beer at half something.\\n\\n\\nlenny_leonard: hey, homer. r.\\nhomer_simpson: moe, it's called!\\nmoe_szyslak: guys, i'm gonna let him want to go to my dad\\n\\n\\nhomer_simpson:(to moe) thirty cases of cough syrup. sign in the way.\\nbarney_gumble: yeah, that's probably what i look at you, i'm too?\\nmoe_szyslak: oh, here. the audience is still love over.\\nmoe's_thoughts: this is kent brockman. and it begins,\\\" dear is to that!\\nmoe_szyslak:(laughs) if you want to be back.\\nvoice: excuse me, so you can either sit here in the back of my cruiser.\\nhomer_simpson: well if i only got their secrets.\\nlenny_leonard:(amiable) amanda\\n\"\n ]\n ],\n [\n [\n \"# The TV Script is Nonsensical\\nIt's ok if the TV script doesn't make any sense. We trained on less than a megabyte of text. In order to get good results, you'll have to use a smaller vocabulary or get more data. Luckly there's more data! As we mentioned in the begging of this project, this is a subset of [another dataset](https://www.kaggle.com/wcukierski/the-simpsons-by-the-data). We didn't have you train on all the data, because that would take too long. However, you are free to train your neural network on all the data. After you complete the project, of course.\\n# Submitting This Project\\nWhen submitting this project, make sure to run all the cells before saving the notebook. Save the notebook file as \\\"dlnd_tv_script_generation.ipynb\\\" and save it as a HTML file under \\\"File\\\" -> \\\"Download as\\\". Include the \\\"helper.py\\\" and \\\"problem_unittests.py\\\" files in your submission.\",\n \"_____no_output_____\"\n ]\n ]\n]"},"cell_types":{"kind":"list like","value":["markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown"],"string":"[\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\"\n]"},"cell_type_groups":{"kind":"list like","value":[["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"]],"string":"[\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ]\n]"}}},{"rowIdx":1459032,"cells":{"hexsha":{"kind":"string","value":"e7ee48fb8979200b2b5204eff5c72942a392b09e"},"size":{"kind":"number","value":51829,"string":"51,829"},"ext":{"kind":"string","value":"ipynb"},"lang":{"kind":"string","value":"Jupyter Notebook"},"max_stars_repo_path":{"kind":"string","value":"house_prices/analysis12.ipynb"},"max_stars_repo_name":{"kind":"string","value":"randat9/House_Prices"},"max_stars_repo_head_hexsha":{"kind":"string","value":"3a7e51e1ac36aea0faabc61786652cf706b53c7e"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"house_prices/analysis12.ipynb"},"max_issues_repo_name":{"kind":"string","value":"randat9/House_Prices"},"max_issues_repo_head_hexsha":{"kind":"string","value":"3a7e51e1ac36aea0faabc61786652cf706b53c7e"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"house_prices/analysis12.ipynb"},"max_forks_repo_name":{"kind":"string","value":"randat9/House_Prices"},"max_forks_repo_head_hexsha":{"kind":"string","value":"3a7e51e1ac36aea0faabc61786652cf706b53c7e"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"avg_line_length":{"kind":"number","value":80.730529595,"string":"80.73053"},"max_line_length":{"kind":"number","value":6686,"string":"6,686"},"alphanum_fraction":{"kind":"number","value":0.4474521986,"string":"0.447452"},"cells":{"kind":"list like","value":[[["import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nimport pandas_profiling as pp\nimport seaborn as sns\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import OneHotEncoder\n\nfrom functions.preprocessing import Imputer, CategoricalEncoder, remove_outliers\n\nfrom lazypredict.Supervised import LazyRegressor\n\nplt.style.use('ggplot')","_____no_output_____"],["def remove_empty_features(data, threshold):\n \"\"\"...\"\"\"\n cols_to_drop = [column for column in data.columns \n if data[column].isna().mean() > threshold]\n data = data.drop(columns = cols_to_drop)\n return data, cols_to_drop\n\ndef mapping_from_list(order):\n return {label: idx for idx, label in enumerate(order)}\n\ndef ordinal_feature(data: pd.DataFrame, dictionary: dict):\n \"\"\" Transform ordinal features\n\n Args:\n data (dataframe)\n dictionary (dict)\n\n Returns:\n data (dataframe): encoded dataframe\n \"\"\"\n data_copy = data.copy()\n for key,value in dictionary.items():\n data_copy[key] = data_copy[key].map(mapping_from_list(value))\n\n return data_copy","_____no_output_____"],["# Road raw training data\nraw_data = pd.read_csv('train.csv', index_col=0)\nraw_data.head(5)","_____no_output_____"],["options = {\n \"MSSubClass\": {\"strategy\": \"most_frequent\"},\n \"MSZoning\": {\"strategy\": \"most_frequent\"},\n \"LotFrontage\": {\"strategy\": \"mean\"},\n \"LotArea\": {\"strategy\": \"mean\"},\n \"Street\": {\"strategy\": \"most_frequent\"},\n \"Alley\": {\"strategy\": \"constant\", \"fill_value\": \"NoAccess\"},\n \"LotShape\": {\"strategy\": \"most_frequent\"},\n \"LandContour\": {\"strategy\": \"most_frequent\"},\n \"Utilities\": {\"strategy\": \"most_frequent\"},\n \"LotConfig\": {\"strategy\": \"most_frequent\"},\n \"LandSlope\": {\"strategy\": \"most_frequent\"},\n \"Neighborhood\": {\"strategy\": \"most_frequent\"},\n \"Condition1\": {\"strategy\": \"most_frequent\"},\n \"Condition2\": {\"strategy\": \"most_frequent\"},\n \"Electrical\": {\"strategy\": \"most_frequent\"},\n \"1stFlrSF\": {\"strategy\": \"mean\"},\n \"2ndFlrSF\": {\"strategy\": \"mean\"},\n \"LowQualFinSF\": {\"strategy\": \"mean\"},\n \"GrLivArea\": {\"strategy\": \"mean\"},\n \"BsmtFullBath\": {\"strategy\": \"median\"},\n \"BsmtHalfBath\": {\"strategy\": \"median\"},\n \"FullBath\": {\"strategy\": \"median\"},\n \"HalfBath\": {\"strategy\": \"median\"},\n \"BedroomAbvGr\": {\"strategy\": \"median\"},\n \"KitchenAbvGr\": {\"strategy\": \"median\"},\n \"KitchenQual\": {\"strategy\": \"most_frequent\"},\n \"TotRmsAbvGrd\": {\"strategy\": \"median\"},\n \"BldgType\": {\"strategy\": \"most_frequent\"},\n \"HouseStyle\": {\"strategy\": \"most_frequent\"},\n \"OverallQual\": {\"strategy\": \"median\"},\n \"OverallCond\": {\"strategy\": \"median\"},\n \"YearBuilt\": {\"strategy\": \"median\"},\n \"YearRemodAdd\": {\"strategy\": \"median\"},\n \"RoofStyle\": {\"strategy\": \"most_frequent\"},\n \"RoofMatl\": {\"strategy\": \"most_frequent\"},\n \"Exterior1st\": {\"strategy\": \"most_frequent\"},\n \"Exterior2nd\": {\"strategy\": \"most_frequent\"},\n \"MasVnrType\": {\"strategy\": \"constant\", \"fill_value\": \"None\"},\n \"MasVnrArea\": {\"strategy\": \"mean\"},\n \"ExterQual\": {\"strategy\": \"most_frequent\"},\n \"ExterCond\": {\"strategy\": \"most_frequent\"},\n \"Foundation\": {\"strategy\": \"most_frequent\"},\n \"BsmtQual\": {\"strategy\": \"constant\", \"fill_value\": \"NoBasement\"},\n \"BsmtCond\": {\"strategy\": \"constant\", \"fill_value\": \"NoBasement\"},\n \"BsmtExposure\": {\"strategy\": \"constant\", \"fill_value\": \"NoBasement\"},\n \"BsmtFinType1\": {\"strategy\": \"constant\", \"fill_value\": \"NoBasement\"},\n \"BsmtFinSF1\": {\"strategy\": \"mean\"},\n \"BsmtFinType2\": {\"strategy\": \"constant\", \"fill_value\": \"NoBasement\"},\n \"BsmtFinSF2\": {\"strategy\": \"mean\"},\n \"BsmtUnfSF\": {\"strategy\": \"mean\"},\n \"TotalBsmtSF\": {\"strategy\": \"mean\"},\n \"Heating\": {\"strategy\": \"most_frequent\"},\n \"HeatingQC\": {\"strategy\": \"most_frequent\"},\n \"CentralAir\": {\"strategy\": \"most_frequent\"},\n \"ScreenPorch\": {\"strategy\": \"mean\"},\n \"PoolArea\": {\"strategy\": \"mean\"},\n \"PoolQC\": {\"strategy\": \"constant\", \"fill_value\": \"NoPool\"},\n \"Fence\": {\"strategy\": \"constant\", \"fill_value\": \"NoFence\"},\n \"MiscFeature\": {\"strategy\": \"constant\", \"fill_value\": \"None\"},\n \"MiscVal\": {\"strategy\": \"mean\"},\n \"MoSold\": {\"strategy\": \"median\"},\n \"YrSold\": {\"strategy\": \"median\"},\n \"SaleType\": {\"strategy\": \"most_frequent\"},\n \"SaleCondition\": {\"strategy\": \"most_frequent\"},\n \"Functional\": {\"strategy\": \"most_frequent\"},\n \"Fireplaces\": {\"strategy\": \"most_frequent\"},\n \"FireplaceQu\": {\"strategy\": \"constant\", \"fill_value\": \"NoAccess\"},\n \"GarageType\": {\"strategy\": \"constant\", \"fill_value\": \"NoAccess\"},\n \"GarageYrBlt\": {\"strategy\": \"most_frequent\"},\n \"GarageFinish\": {\"strategy\": \"constant\", \"fill_value\": \"NoAccess\"},\n \"GarageCars\": {\"strategy\": \"most_frequent\"},\n \"GarageArea\": {\"strategy\": \"median\"},\n \"GarageQual\": {\"strategy\": \"constant\", \"fill_value\": \"NoAccess\"},\n \"GarageCond\": {\"strategy\": \"constant\", \"fill_value\": \"NoAccess\"},\n \"PavedDrive\": {\"strategy\": \"most_frequent\"},\n \"WoodDeckSF\": {\"strategy\": \"most_frequent\"},\n \"OpenPorchSF\": {\"strategy\": \"most_frequent\"},\n \"EnclosedPorch\": {\"strategy\": \"mean\"},\n \"3SsnPorch\": {\"strategy\": \"most_frequent\"},\n}","_____no_output_____"],["params = {\n \"threshold_empty_features\": 0.3,\n}\n\ncols_to_drop = {\n \"remove_empty_features\": []\n}\n\ncategorical_colums = ['Exterior1st', 'Foundation', 'MasVnrType', 'Neighborhood', \n 'PavedDrive', 'Electrical', 'MSSubClass', 'SaleCondition',\n 'GarageType', 'Exterior2nd', 'MSZoning', 'CentralAir', \n 'Street','Alley','LandContour','Utilities','LotConfig', 'LandSlope', 'Condition1', 'Condition2', 'BldgType', 'HouseStyle', 'RoofStyle', 'RoofMatl', 'BsmtFinType2', 'Heating', 'Functional', 'GarageCond', 'Fence', 'MiscFeature', 'SaleType']\n\n# Ordinal features options\nordinal_columns = ['HeatingQC', 'GarageQual', 'BsmtFinType1', 'ExterQual', \n 'GarageFinish', 'BsmtExposure', 'LotShape', 'OverallQual',\n 'BsmtQual', 'KitchenQual']\n\nordinal_mapping = {\n 'BsmtExposure': ['None', 'No', 'Mn', 'Av', 'Gd'],\n 'BsmtFinType1': ['None', 'Unf', 'LwQ', 'Rec', 'BLQ', 'ALQ', 'GLQ'],\n 'GarageFinish': ['None', 'Unf', 'RFn', 'Fin'],\n 'LotShape': ['IR3', 'IR2', 'IR1', 'Reg']\n}\n\nordinal_common = ['ExterQual', 'ExterCond', 'BsmtQual', 'BsmtCond', 'HeatingQC',\n 'KitchenQual', 'FireplaceQu', 'GarageQual', 'PoolQC']\nfor column in ordinal_common:\n ordinal_mapping[column] = ['None', 'Po', 'Fa', 'TA', 'Gd']","_____no_output_____"],["# Removing features with a lot of missing values\ndata, cols_to_drop[\"remove_empty_features\"] = remove_empty_features(\n raw_data, \n params[\"threshold_empty_features\"]\n)\n\n# Impute missing values\nimp = Imputer(options=options)\ndata = imp.fit_transform(raw_data)\n\n# HOTFIX\nfor key in imp.options:\n if isinstance(imp.options[key]['_fill'], np.integer):\n imp.options[key]['_fill'] = int(imp.options[key]['_fill'])\nimp.save_options('imputer_options.json')\n\n# Encoding categorical features\nce = CategoricalEncoder(categorical_colums)\ndata = ce.fit_transform(data)\n\n# Encoding ordinal features\ndata = ordinal_feature(data, ordinal_mapping)\n\n# data\ndata","_____no_output_____"]],[["## Model metrics before removing outliers","_____no_output_____"]],[["reg = LazyRegressor()\nX = data.drop(columns = [\"SalePrice\"])\ny = data[\"SalePrice\"]\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state=42)\nmodels, _ = reg.fit(X_train, X_test, y_train, y_test)\nmodels","100%|██████████| 43/43 [00:36<00:00, 1.19it/s]\n"]],[["## Removing outliers","_____no_output_____"]],[["nan_columns = {column: data[column].isna().sum() for column in data.columns if data[column].isna().sum() > 0}\nnan_columns","_____no_output_____"],["data[\"PoolQC\"].sample(10)","_____no_output_____"],["ordinal_common = ['ExterQual', 'ExterCond', 'BsmtQual', 'BsmtCond', 'HeatingQC',\n 'KitchenQual', 'FireplaceQu', 'GarageQual', 'PoolQC']","_____no_output_____"],["outlier_removed_data = remove_outliers(data_no_empty_features, method=\"IsolationForest\", threshold=0.1, model_kwargs = {})","Model to detect outliers is IsolationForest with parameters {}\n"],["reg = LazyRegressor()\nX = outlier_removed_data.drop(columns = [\"SalePrice\"])\ny = outlier_removed_data[\"SalePrice\"]\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)\nmodels, _ = reg.fit(X_train, X_test, y_train, y_test)\nmodels","_____no_output_____"]],[["## TODO:\n\n- Krzysiek:\n - funkcje zwracają indeksy i kolumny\n \n- kbdev\n - Encoding ordinal features as a class\n - fix np.int64 bug in json serialization\n - \n \n- miri\n - nie będzie jej (na 50%)\n \n- Patryk\n - zapis do pliku Encoder, konstruktor z pliku\n - PR \n \n```python\nour_encoder = OurOneHotEncoder(columns=...)\ndata = our_encoder.fit(data)\nour_encoder.save(file.json)\n \nour_encoder.from_file(file.json)\nour_encoder.transform(other_data)\n```\n","_____no_output_____"]]],"string":"[\n [\n [\n \"import numpy as np\\nimport pandas as pd\\nimport matplotlib.pyplot as plt\\nimport matplotlib as mpl\\nimport pandas_profiling as pp\\nimport seaborn as sns\\n\\nfrom sklearn.model_selection import train_test_split\\nfrom sklearn.preprocessing import OneHotEncoder\\n\\nfrom functions.preprocessing import Imputer, CategoricalEncoder, remove_outliers\\n\\nfrom lazypredict.Supervised import LazyRegressor\\n\\nplt.style.use('ggplot')\",\n \"_____no_output_____\"\n ],\n [\n \"def remove_empty_features(data, threshold):\\n \\\"\\\"\\\"...\\\"\\\"\\\"\\n cols_to_drop = [column for column in data.columns \\n if data[column].isna().mean() > threshold]\\n data = data.drop(columns = cols_to_drop)\\n return data, cols_to_drop\\n\\ndef mapping_from_list(order):\\n return {label: idx for idx, label in enumerate(order)}\\n\\ndef ordinal_feature(data: pd.DataFrame, dictionary: dict):\\n \\\"\\\"\\\" Transform ordinal features\\n\\n Args:\\n data (dataframe)\\n dictionary (dict)\\n\\n Returns:\\n data (dataframe): encoded dataframe\\n \\\"\\\"\\\"\\n data_copy = data.copy()\\n for key,value in dictionary.items():\\n data_copy[key] = data_copy[key].map(mapping_from_list(value))\\n\\n return data_copy\",\n \"_____no_output_____\"\n ],\n [\n \"# Road raw training data\\nraw_data = pd.read_csv('train.csv', index_col=0)\\nraw_data.head(5)\",\n \"_____no_output_____\"\n ],\n [\n \"options = {\\n \\\"MSSubClass\\\": {\\\"strategy\\\": \\\"most_frequent\\\"},\\n \\\"MSZoning\\\": {\\\"strategy\\\": \\\"most_frequent\\\"},\\n \\\"LotFrontage\\\": {\\\"strategy\\\": \\\"mean\\\"},\\n \\\"LotArea\\\": {\\\"strategy\\\": \\\"mean\\\"},\\n \\\"Street\\\": {\\\"strategy\\\": \\\"most_frequent\\\"},\\n \\\"Alley\\\": {\\\"strategy\\\": \\\"constant\\\", \\\"fill_value\\\": \\\"NoAccess\\\"},\\n \\\"LotShape\\\": {\\\"strategy\\\": \\\"most_frequent\\\"},\\n \\\"LandContour\\\": {\\\"strategy\\\": \\\"most_frequent\\\"},\\n \\\"Utilities\\\": {\\\"strategy\\\": \\\"most_frequent\\\"},\\n \\\"LotConfig\\\": {\\\"strategy\\\": \\\"most_frequent\\\"},\\n \\\"LandSlope\\\": {\\\"strategy\\\": \\\"most_frequent\\\"},\\n \\\"Neighborhood\\\": {\\\"strategy\\\": \\\"most_frequent\\\"},\\n \\\"Condition1\\\": {\\\"strategy\\\": \\\"most_frequent\\\"},\\n \\\"Condition2\\\": {\\\"strategy\\\": \\\"most_frequent\\\"},\\n \\\"Electrical\\\": {\\\"strategy\\\": \\\"most_frequent\\\"},\\n \\\"1stFlrSF\\\": {\\\"strategy\\\": \\\"mean\\\"},\\n \\\"2ndFlrSF\\\": {\\\"strategy\\\": \\\"mean\\\"},\\n \\\"LowQualFinSF\\\": {\\\"strategy\\\": \\\"mean\\\"},\\n \\\"GrLivArea\\\": {\\\"strategy\\\": \\\"mean\\\"},\\n \\\"BsmtFullBath\\\": {\\\"strategy\\\": \\\"median\\\"},\\n \\\"BsmtHalfBath\\\": {\\\"strategy\\\": \\\"median\\\"},\\n \\\"FullBath\\\": {\\\"strategy\\\": \\\"median\\\"},\\n \\\"HalfBath\\\": {\\\"strategy\\\": \\\"median\\\"},\\n \\\"BedroomAbvGr\\\": {\\\"strategy\\\": \\\"median\\\"},\\n \\\"KitchenAbvGr\\\": {\\\"strategy\\\": \\\"median\\\"},\\n \\\"KitchenQual\\\": {\\\"strategy\\\": \\\"most_frequent\\\"},\\n \\\"TotRmsAbvGrd\\\": {\\\"strategy\\\": \\\"median\\\"},\\n \\\"BldgType\\\": {\\\"strategy\\\": \\\"most_frequent\\\"},\\n \\\"HouseStyle\\\": {\\\"strategy\\\": \\\"most_frequent\\\"},\\n \\\"OverallQual\\\": {\\\"strategy\\\": \\\"median\\\"},\\n \\\"OverallCond\\\": {\\\"strategy\\\": \\\"median\\\"},\\n \\\"YearBuilt\\\": {\\\"strategy\\\": \\\"median\\\"},\\n \\\"YearRemodAdd\\\": {\\\"strategy\\\": \\\"median\\\"},\\n \\\"RoofStyle\\\": {\\\"strategy\\\": \\\"most_frequent\\\"},\\n \\\"RoofMatl\\\": {\\\"strategy\\\": \\\"most_frequent\\\"},\\n \\\"Exterior1st\\\": {\\\"strategy\\\": \\\"most_frequent\\\"},\\n \\\"Exterior2nd\\\": {\\\"strategy\\\": \\\"most_frequent\\\"},\\n \\\"MasVnrType\\\": {\\\"strategy\\\": \\\"constant\\\", \\\"fill_value\\\": \\\"None\\\"},\\n \\\"MasVnrArea\\\": {\\\"strategy\\\": \\\"mean\\\"},\\n \\\"ExterQual\\\": {\\\"strategy\\\": \\\"most_frequent\\\"},\\n \\\"ExterCond\\\": {\\\"strategy\\\": \\\"most_frequent\\\"},\\n \\\"Foundation\\\": {\\\"strategy\\\": \\\"most_frequent\\\"},\\n \\\"BsmtQual\\\": {\\\"strategy\\\": \\\"constant\\\", \\\"fill_value\\\": \\\"NoBasement\\\"},\\n \\\"BsmtCond\\\": {\\\"strategy\\\": \\\"constant\\\", \\\"fill_value\\\": \\\"NoBasement\\\"},\\n \\\"BsmtExposure\\\": {\\\"strategy\\\": \\\"constant\\\", \\\"fill_value\\\": \\\"NoBasement\\\"},\\n \\\"BsmtFinType1\\\": {\\\"strategy\\\": \\\"constant\\\", \\\"fill_value\\\": \\\"NoBasement\\\"},\\n \\\"BsmtFinSF1\\\": {\\\"strategy\\\": \\\"mean\\\"},\\n \\\"BsmtFinType2\\\": {\\\"strategy\\\": \\\"constant\\\", \\\"fill_value\\\": \\\"NoBasement\\\"},\\n \\\"BsmtFinSF2\\\": {\\\"strategy\\\": \\\"mean\\\"},\\n \\\"BsmtUnfSF\\\": {\\\"strategy\\\": \\\"mean\\\"},\\n \\\"TotalBsmtSF\\\": {\\\"strategy\\\": \\\"mean\\\"},\\n \\\"Heating\\\": {\\\"strategy\\\": \\\"most_frequent\\\"},\\n \\\"HeatingQC\\\": {\\\"strategy\\\": \\\"most_frequent\\\"},\\n \\\"CentralAir\\\": {\\\"strategy\\\": \\\"most_frequent\\\"},\\n \\\"ScreenPorch\\\": {\\\"strategy\\\": \\\"mean\\\"},\\n \\\"PoolArea\\\": {\\\"strategy\\\": \\\"mean\\\"},\\n \\\"PoolQC\\\": {\\\"strategy\\\": \\\"constant\\\", \\\"fill_value\\\": \\\"NoPool\\\"},\\n \\\"Fence\\\": {\\\"strategy\\\": \\\"constant\\\", \\\"fill_value\\\": \\\"NoFence\\\"},\\n \\\"MiscFeature\\\": {\\\"strategy\\\": \\\"constant\\\", \\\"fill_value\\\": \\\"None\\\"},\\n \\\"MiscVal\\\": {\\\"strategy\\\": \\\"mean\\\"},\\n \\\"MoSold\\\": {\\\"strategy\\\": \\\"median\\\"},\\n \\\"YrSold\\\": {\\\"strategy\\\": \\\"median\\\"},\\n \\\"SaleType\\\": {\\\"strategy\\\": \\\"most_frequent\\\"},\\n \\\"SaleCondition\\\": {\\\"strategy\\\": \\\"most_frequent\\\"},\\n \\\"Functional\\\": {\\\"strategy\\\": \\\"most_frequent\\\"},\\n \\\"Fireplaces\\\": {\\\"strategy\\\": \\\"most_frequent\\\"},\\n \\\"FireplaceQu\\\": {\\\"strategy\\\": \\\"constant\\\", \\\"fill_value\\\": \\\"NoAccess\\\"},\\n \\\"GarageType\\\": {\\\"strategy\\\": \\\"constant\\\", \\\"fill_value\\\": \\\"NoAccess\\\"},\\n \\\"GarageYrBlt\\\": {\\\"strategy\\\": \\\"most_frequent\\\"},\\n \\\"GarageFinish\\\": {\\\"strategy\\\": \\\"constant\\\", \\\"fill_value\\\": \\\"NoAccess\\\"},\\n \\\"GarageCars\\\": {\\\"strategy\\\": \\\"most_frequent\\\"},\\n \\\"GarageArea\\\": {\\\"strategy\\\": \\\"median\\\"},\\n \\\"GarageQual\\\": {\\\"strategy\\\": \\\"constant\\\", \\\"fill_value\\\": \\\"NoAccess\\\"},\\n \\\"GarageCond\\\": {\\\"strategy\\\": \\\"constant\\\", \\\"fill_value\\\": \\\"NoAccess\\\"},\\n \\\"PavedDrive\\\": {\\\"strategy\\\": \\\"most_frequent\\\"},\\n \\\"WoodDeckSF\\\": {\\\"strategy\\\": \\\"most_frequent\\\"},\\n \\\"OpenPorchSF\\\": {\\\"strategy\\\": \\\"most_frequent\\\"},\\n \\\"EnclosedPorch\\\": {\\\"strategy\\\": \\\"mean\\\"},\\n \\\"3SsnPorch\\\": {\\\"strategy\\\": \\\"most_frequent\\\"},\\n}\",\n \"_____no_output_____\"\n ],\n [\n \"params = {\\n \\\"threshold_empty_features\\\": 0.3,\\n}\\n\\ncols_to_drop = {\\n \\\"remove_empty_features\\\": []\\n}\\n\\ncategorical_colums = ['Exterior1st', 'Foundation', 'MasVnrType', 'Neighborhood', \\n 'PavedDrive', 'Electrical', 'MSSubClass', 'SaleCondition',\\n 'GarageType', 'Exterior2nd', 'MSZoning', 'CentralAir', \\n 'Street','Alley','LandContour','Utilities','LotConfig', 'LandSlope', 'Condition1', 'Condition2', 'BldgType', 'HouseStyle', 'RoofStyle', 'RoofMatl', 'BsmtFinType2', 'Heating', 'Functional', 'GarageCond', 'Fence', 'MiscFeature', 'SaleType']\\n\\n# Ordinal features options\\nordinal_columns = ['HeatingQC', 'GarageQual', 'BsmtFinType1', 'ExterQual', \\n 'GarageFinish', 'BsmtExposure', 'LotShape', 'OverallQual',\\n 'BsmtQual', 'KitchenQual']\\n\\nordinal_mapping = {\\n 'BsmtExposure': ['None', 'No', 'Mn', 'Av', 'Gd'],\\n 'BsmtFinType1': ['None', 'Unf', 'LwQ', 'Rec', 'BLQ', 'ALQ', 'GLQ'],\\n 'GarageFinish': ['None', 'Unf', 'RFn', 'Fin'],\\n 'LotShape': ['IR3', 'IR2', 'IR1', 'Reg']\\n}\\n\\nordinal_common = ['ExterQual', 'ExterCond', 'BsmtQual', 'BsmtCond', 'HeatingQC',\\n 'KitchenQual', 'FireplaceQu', 'GarageQual', 'PoolQC']\\nfor column in ordinal_common:\\n ordinal_mapping[column] = ['None', 'Po', 'Fa', 'TA', 'Gd']\",\n \"_____no_output_____\"\n ],\n [\n \"# Removing features with a lot of missing values\\ndata, cols_to_drop[\\\"remove_empty_features\\\"] = remove_empty_features(\\n raw_data, \\n params[\\\"threshold_empty_features\\\"]\\n)\\n\\n# Impute missing values\\nimp = Imputer(options=options)\\ndata = imp.fit_transform(raw_data)\\n\\n# HOTFIX\\nfor key in imp.options:\\n if isinstance(imp.options[key]['_fill'], np.integer):\\n imp.options[key]['_fill'] = int(imp.options[key]['_fill'])\\nimp.save_options('imputer_options.json')\\n\\n# Encoding categorical features\\nce = CategoricalEncoder(categorical_colums)\\ndata = ce.fit_transform(data)\\n\\n# Encoding ordinal features\\ndata = ordinal_feature(data, ordinal_mapping)\\n\\n# data\\ndata\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"## Model metrics before removing outliers\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"reg = LazyRegressor()\\nX = data.drop(columns = [\\\"SalePrice\\\"])\\ny = data[\\\"SalePrice\\\"]\\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state=42)\\nmodels, _ = reg.fit(X_train, X_test, y_train, y_test)\\nmodels\",\n \"100%|██████████| 43/43 [00:36<00:00, 1.19it/s]\\n\"\n ]\n ],\n [\n [\n \"## Removing outliers\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"nan_columns = {column: data[column].isna().sum() for column in data.columns if data[column].isna().sum() > 0}\\nnan_columns\",\n \"_____no_output_____\"\n ],\n [\n \"data[\\\"PoolQC\\\"].sample(10)\",\n \"_____no_output_____\"\n ],\n [\n \"ordinal_common = ['ExterQual', 'ExterCond', 'BsmtQual', 'BsmtCond', 'HeatingQC',\\n 'KitchenQual', 'FireplaceQu', 'GarageQual', 'PoolQC']\",\n \"_____no_output_____\"\n ],\n [\n \"outlier_removed_data = remove_outliers(data_no_empty_features, method=\\\"IsolationForest\\\", threshold=0.1, model_kwargs = {})\",\n \"Model to detect outliers is IsolationForest with parameters {}\\n\"\n ],\n [\n \"reg = LazyRegressor()\\nX = outlier_removed_data.drop(columns = [\\\"SalePrice\\\"])\\ny = outlier_removed_data[\\\"SalePrice\\\"]\\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)\\nmodels, _ = reg.fit(X_train, X_test, y_train, y_test)\\nmodels\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"## TODO:\\n\\n- Krzysiek:\\n - funkcje zwracają indeksy i kolumny\\n \\n- kbdev\\n - Encoding ordinal features as a class\\n - fix np.int64 bug in json serialization\\n - \\n \\n- miri\\n - nie będzie jej (na 50%)\\n \\n- Patryk\\n - zapis do pliku Encoder, konstruktor z pliku\\n - PR \\n \\n```python\\nour_encoder = OurOneHotEncoder(columns=...)\\ndata = our_encoder.fit(data)\\nour_encoder.save(file.json)\\n \\nour_encoder.from_file(file.json)\\nour_encoder.transform(other_data)\\n```\\n\",\n \"_____no_output_____\"\n ]\n ]\n]"},"cell_types":{"kind":"list like","value":["code","markdown","code","markdown","code","markdown"],"string":"[\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\"\n]"},"cell_type_groups":{"kind":"list like","value":[["code","code","code","code","code","code"],["markdown"],["code"],["markdown"],["code","code","code","code","code"],["markdown"]],"string":"[\n [\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ]\n]"}}},{"rowIdx":1459033,"cells":{"hexsha":{"kind":"string","value":"e7ee4c0b6ac20952088caee5c81d8ba2416d4de1"},"size":{"kind":"number","value":32382,"string":"32,382"},"ext":{"kind":"string","value":"ipynb"},"lang":{"kind":"string","value":"Jupyter Notebook"},"max_stars_repo_path":{"kind":"string","value":"Big_Dreams.ipynb"},"max_stars_repo_name":{"kind":"string","value":"Lore8614/Lore8614.github.io"},"max_stars_repo_head_hexsha":{"kind":"string","value":"492cbdf0e443d5ffc1fbddc079ca3dc301c14485"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"Big_Dreams.ipynb"},"max_issues_repo_name":{"kind":"string","value":"Lore8614/Lore8614.github.io"},"max_issues_repo_head_hexsha":{"kind":"string","value":"492cbdf0e443d5ffc1fbddc079ca3dc301c14485"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"Big_Dreams.ipynb"},"max_forks_repo_name":{"kind":"string","value":"Lore8614/Lore8614.github.io"},"max_forks_repo_head_hexsha":{"kind":"string","value":"492cbdf0e443d5ffc1fbddc079ca3dc301c14485"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"number","value":1,"string":"1"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2020-12-04T19:31:26.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2020-12-04T19:31:26.000Z"},"avg_line_length":{"kind":"number","value":36.5485327314,"string":"36.548533"},"max_line_length":{"kind":"number","value":236,"string":"236"},"alphanum_fraction":{"kind":"number","value":0.2655178803,"string":"0.265518"},"cells":{"kind":"list like","value":[[["\"Open","_____no_output_____"]],[["import pandas as pd","_____no_output_____"],["from google.colab import drive\ndrive.mount('/content/drive')","Mounted at /content/drive\n"],["%pwd\n%ls '/content/drive/My Drive/Machine Learning Final'","ls: cannot access '/content/drive/My Drive/Machine Learning Final': No such file or directory\n"],["pos_muts = pd.read_csv('/content/drive/My Drive/Machine Learning Final/H77_metadata.csv')\nfreqs = pd.read_csv('/content/drive/My Drive/Machine Learning Final/HCV1a_TsMutFreq_195.csv')\nmut_rate = pd.read_csv('/content/drive/My Drive/Machine Learning Final/Geller.mutation.rates_update.csv')\nfreqs.head()\n","_____no_output_____"],["mut_rate.head()","_____no_output_____"],["pos_muts.head()","_____no_output_____"],["# Start Calculating costs","_____no_output_____"]]],"string":"[\n [\n [\n \"\\\"Open\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"import pandas as pd\",\n \"_____no_output_____\"\n ],\n [\n \"from google.colab import drive\\ndrive.mount('/content/drive')\",\n \"Mounted at /content/drive\\n\"\n ],\n [\n \"%pwd\\n%ls '/content/drive/My Drive/Machine Learning Final'\",\n \"ls: cannot access '/content/drive/My Drive/Machine Learning Final': No such file or directory\\n\"\n ],\n [\n \"pos_muts = pd.read_csv('/content/drive/My Drive/Machine Learning Final/H77_metadata.csv')\\nfreqs = pd.read_csv('/content/drive/My Drive/Machine Learning Final/HCV1a_TsMutFreq_195.csv')\\nmut_rate = pd.read_csv('/content/drive/My Drive/Machine Learning Final/Geller.mutation.rates_update.csv')\\nfreqs.head()\\n\",\n \"_____no_output_____\"\n ],\n [\n \"mut_rate.head()\",\n \"_____no_output_____\"\n ],\n [\n \"pos_muts.head()\",\n \"_____no_output_____\"\n ],\n [\n \"# Start Calculating costs\",\n \"_____no_output_____\"\n ]\n ]\n]"},"cell_types":{"kind":"list like","value":["markdown","code"],"string":"[\n \"markdown\",\n \"code\"\n]"},"cell_type_groups":{"kind":"list like","value":[["markdown"],["code","code","code","code","code","code","code"]],"string":"[\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\"\n ]\n]"}}},{"rowIdx":1459034,"cells":{"hexsha":{"kind":"string","value":"e7ee57ccca319457adc283acd24079f764992616"},"size":{"kind":"number","value":8596,"string":"8,596"},"ext":{"kind":"string","value":"ipynb"},"lang":{"kind":"string","value":"Jupyter Notebook"},"max_stars_repo_path":{"kind":"string","value":"notebooks/cross_validation_nested.ipynb"},"max_stars_repo_name":{"kind":"string","value":"nish2612/scikit-learn-mooc"},"max_stars_repo_head_hexsha":{"kind":"string","value":"daa9945beddf3318ef20770bf44b77f1e747d7fa"},"max_stars_repo_licenses":{"kind":"list like","value":["CC-BY-4.0"],"string":"[\n \"CC-BY-4.0\"\n]"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2021-06-05T01:22:12.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2021-06-05T01:22:12.000Z"},"max_issues_repo_path":{"kind":"string","value":"notebooks/cross_validation_nested.ipynb"},"max_issues_repo_name":{"kind":"string","value":"Mamane403/scikit-learn-mooc"},"max_issues_repo_head_hexsha":{"kind":"string","value":"cdfe0e9ac16b5d7fa4c8fb343141c10eb98828f4"},"max_issues_repo_licenses":{"kind":"list like","value":["CC-BY-4.0"],"string":"[\n \"CC-BY-4.0\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"notebooks/cross_validation_nested.ipynb"},"max_forks_repo_name":{"kind":"string","value":"Mamane403/scikit-learn-mooc"},"max_forks_repo_head_hexsha":{"kind":"string","value":"cdfe0e9ac16b5d7fa4c8fb343141c10eb98828f4"},"max_forks_repo_licenses":{"kind":"list like","value":["CC-BY-4.0"],"string":"[\n \"CC-BY-4.0\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"avg_line_length":{"kind":"number","value":34.384,"string":"34.384"},"max_line_length":{"kind":"number","value":88,"string":"88"},"alphanum_fraction":{"kind":"number","value":0.6237785016,"string":"0.623779"},"cells":{"kind":"list like","value":[[["# Nested cross-validation\n\nIn this notebook, we show a pattern called **nested cross-validation** which\nshould be used when you want to both evaluate a model and tune the\nmodel's hyperparameters.\n\nCross-validation is a powerful tool to evaluate the statistical performance\nof a model. It is also used to select the best model from a pool of models.\nThis pool of models can be the same family of predictor but with different\nparameters. In this case, we call this procedure **hyperparameter tuning**.\n\nWe could also imagine that we would like to choose among heterogeneous models\nthat will similarly use the cross-validation.\n\nBefore we go into details regarding the nested cross-validation, we will\nfirst recall the pattern used to fine tune a model's hyperparameters.\n\nLet's load the breast cancer dataset.","_____no_output_____"]],[["from sklearn.datasets import load_breast_cancer\n\ndata, target = load_breast_cancer(return_X_y=True)","_____no_output_____"]],[["Now, we'll make a minimal example using the utility `GridSearchCV` to find\nthe best parameters via cross-validation.","_____no_output_____"]],[["from sklearn.model_selection import GridSearchCV\nfrom sklearn.svm import SVC\n\nparam_grid = {\"C\": [0.1, 1, 10], \"gamma\": [.01, .1]}\nmodel_to_tune = SVC()\n\nsearch = GridSearchCV(estimator=model_to_tune, param_grid=param_grid,\n n_jobs=2)\nsearch.fit(data, target)","_____no_output_____"]],[["We recall that `GridSearchCV` will train a model with some specific parameter\non a training set and evaluate it on testing. However, this evaluation is\ndone via cross-validation using the `cv` parameter. This procedure is\nrepeated for all possible combinations of parameters given in `param_grid`.\n\nThe attribute `best_params_` will give us the best set of parameters that\nmaximize the mean score on the internal test sets.","_____no_output_____"]],[["print(f\"The best parameter found are: {search.best_params_}\")","_____no_output_____"]],[["We can now show the mean score obtained using the parameter `best_score_`.","_____no_output_____"]],[["print(f\"The mean score in CV is: {search.best_score_:.3f}\")","_____no_output_____"]],[["At this stage, one should be extremely careful using this score. The\nmisinterpretation would be the following: since the score was computed on a\ntest set, it could be considered our model's testing score.\n\nHowever, we should not forget that we used this score to pick-up the best\nmodel. It means that we used knowledge from the test set (i.e. test score) to\ndecide our model's training parameter.\n\nThus, this score is not a reasonable estimate of our testing error.\nIndeed, we can show that it will be too optimistic in practice. The good way\nis to use a \"nested\" cross-validation. We will use an inner cross-validation\ncorresponding to the previous procedure shown to optimize the\nhyperparameters. We will also include this procedure within an outer\ncross-validation, which will be used to estimate the testing error of\nour tuned model.\n\nIn this case, our inner cross-validation will always get the training set of\nthe outer cross-validation, making it possible to compute the testing\nscore on a completely independent set.\n\nWe will show below how we can create such nested cross-validation and obtain\nthe testing score.","_____no_output_____"]],[["from sklearn.model_selection import cross_val_score, KFold\n\n# Declare the inner and outer cross-validation\ninner_cv = KFold(n_splits=4, shuffle=True, random_state=0)\nouter_cv = KFold(n_splits=4, shuffle=True, random_state=0)\n\n# Inner cross-validation for parameter search\nmodel = GridSearchCV(\n estimator=model_to_tune, param_grid=param_grid, cv=inner_cv, n_jobs=2)\n\n# Outer cross-validation to compute the testing score\ntest_score = cross_val_score(model, data, target, cv=outer_cv, n_jobs=2)\nprint(f\"The mean score using nested cross-validation is: \"\n f\"{test_score.mean():.3f} +/- {test_score.std():.3f}\")","_____no_output_____"]],[["In the example above, the reported score is more trustful and should be close\nto production's expected statistical performance.\n\nWe will illustrate the difference between the nested and non-nested\ncross-validation scores to show that the latter one will be too optimistic in\npractice. In this regard, we will repeat several time the experiment and\nshuffle the data differently. Besides, we will store the score obtain with\nand without the nested cross-validation.","_____no_output_____"]],[["test_score_not_nested = []\ntest_score_nested = []\n\nN_TRIALS = 20\nfor i in range(N_TRIALS):\n inner_cv = KFold(n_splits=4, shuffle=True, random_state=i)\n outer_cv = KFold(n_splits=4, shuffle=True, random_state=i)\n\n # Non_nested parameter search and scoring\n model = GridSearchCV(estimator=model_to_tune, param_grid=param_grid,\n cv=inner_cv, n_jobs=2)\n model.fit(data, target)\n test_score_not_nested.append(model.best_score_)\n\n # Nested CV with parameter optimization\n test_score = cross_val_score(model, data, target, cv=outer_cv, n_jobs=2)\n test_score_nested.append(test_score.mean())","_____no_output_____"]],[["We can merge the data together and make a box plot of the two strategies.","_____no_output_____"]],[["import pandas as pd\n\nall_scores = {\n \"Not nested CV\": test_score_not_nested,\n \"Nested CV\": test_score_nested,\n}\nall_scores = pd.DataFrame(all_scores)","_____no_output_____"],["import matplotlib.pyplot as plt\n\ncolor = {\"whiskers\": \"black\", \"medians\": \"black\", \"caps\": \"black\"}\nall_scores.plot.box(color=color, vert=False)\nplt.xlabel(\"Accuracy\")\n_ = plt.title(\"Comparison of mean accuracy obtained on the test sets with\\n\"\n \"and without nested cross-validation\")","_____no_output_____"]],[["We observe that the model's statistical performance with the nested\ncross-validation is not as good as the non-nested cross-validation.\n\nAs a conclusion, when optimizing parts of the machine learning pipeline (e.g.\nhyperparameter, transform, etc.), one needs to use nested cross-validation to\nevaluate the statistical performance of the predictive model. Otherwise, the\nresults obtained without nested cross-validation are over-optimistic.","_____no_output_____"]]],"string":"[\n [\n [\n \"# Nested cross-validation\\n\\nIn this notebook, we show a pattern called **nested cross-validation** which\\nshould be used when you want to both evaluate a model and tune the\\nmodel's hyperparameters.\\n\\nCross-validation is a powerful tool to evaluate the statistical performance\\nof a model. It is also used to select the best model from a pool of models.\\nThis pool of models can be the same family of predictor but with different\\nparameters. In this case, we call this procedure **hyperparameter tuning**.\\n\\nWe could also imagine that we would like to choose among heterogeneous models\\nthat will similarly use the cross-validation.\\n\\nBefore we go into details regarding the nested cross-validation, we will\\nfirst recall the pattern used to fine tune a model's hyperparameters.\\n\\nLet's load the breast cancer dataset.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"from sklearn.datasets import load_breast_cancer\\n\\ndata, target = load_breast_cancer(return_X_y=True)\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"Now, we'll make a minimal example using the utility `GridSearchCV` to find\\nthe best parameters via cross-validation.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"from sklearn.model_selection import GridSearchCV\\nfrom sklearn.svm import SVC\\n\\nparam_grid = {\\\"C\\\": [0.1, 1, 10], \\\"gamma\\\": [.01, .1]}\\nmodel_to_tune = SVC()\\n\\nsearch = GridSearchCV(estimator=model_to_tune, param_grid=param_grid,\\n n_jobs=2)\\nsearch.fit(data, target)\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"We recall that `GridSearchCV` will train a model with some specific parameter\\non a training set and evaluate it on testing. However, this evaluation is\\ndone via cross-validation using the `cv` parameter. This procedure is\\nrepeated for all possible combinations of parameters given in `param_grid`.\\n\\nThe attribute `best_params_` will give us the best set of parameters that\\nmaximize the mean score on the internal test sets.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"print(f\\\"The best parameter found are: {search.best_params_}\\\")\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"We can now show the mean score obtained using the parameter `best_score_`.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"print(f\\\"The mean score in CV is: {search.best_score_:.3f}\\\")\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"At this stage, one should be extremely careful using this score. The\\nmisinterpretation would be the following: since the score was computed on a\\ntest set, it could be considered our model's testing score.\\n\\nHowever, we should not forget that we used this score to pick-up the best\\nmodel. It means that we used knowledge from the test set (i.e. test score) to\\ndecide our model's training parameter.\\n\\nThus, this score is not a reasonable estimate of our testing error.\\nIndeed, we can show that it will be too optimistic in practice. The good way\\nis to use a \\\"nested\\\" cross-validation. We will use an inner cross-validation\\ncorresponding to the previous procedure shown to optimize the\\nhyperparameters. We will also include this procedure within an outer\\ncross-validation, which will be used to estimate the testing error of\\nour tuned model.\\n\\nIn this case, our inner cross-validation will always get the training set of\\nthe outer cross-validation, making it possible to compute the testing\\nscore on a completely independent set.\\n\\nWe will show below how we can create such nested cross-validation and obtain\\nthe testing score.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"from sklearn.model_selection import cross_val_score, KFold\\n\\n# Declare the inner and outer cross-validation\\ninner_cv = KFold(n_splits=4, shuffle=True, random_state=0)\\nouter_cv = KFold(n_splits=4, shuffle=True, random_state=0)\\n\\n# Inner cross-validation for parameter search\\nmodel = GridSearchCV(\\n estimator=model_to_tune, param_grid=param_grid, cv=inner_cv, n_jobs=2)\\n\\n# Outer cross-validation to compute the testing score\\ntest_score = cross_val_score(model, data, target, cv=outer_cv, n_jobs=2)\\nprint(f\\\"The mean score using nested cross-validation is: \\\"\\n f\\\"{test_score.mean():.3f} +/- {test_score.std():.3f}\\\")\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"In the example above, the reported score is more trustful and should be close\\nto production's expected statistical performance.\\n\\nWe will illustrate the difference between the nested and non-nested\\ncross-validation scores to show that the latter one will be too optimistic in\\npractice. In this regard, we will repeat several time the experiment and\\nshuffle the data differently. Besides, we will store the score obtain with\\nand without the nested cross-validation.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"test_score_not_nested = []\\ntest_score_nested = []\\n\\nN_TRIALS = 20\\nfor i in range(N_TRIALS):\\n inner_cv = KFold(n_splits=4, shuffle=True, random_state=i)\\n outer_cv = KFold(n_splits=4, shuffle=True, random_state=i)\\n\\n # Non_nested parameter search and scoring\\n model = GridSearchCV(estimator=model_to_tune, param_grid=param_grid,\\n cv=inner_cv, n_jobs=2)\\n model.fit(data, target)\\n test_score_not_nested.append(model.best_score_)\\n\\n # Nested CV with parameter optimization\\n test_score = cross_val_score(model, data, target, cv=outer_cv, n_jobs=2)\\n test_score_nested.append(test_score.mean())\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"We can merge the data together and make a box plot of the two strategies.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"import pandas as pd\\n\\nall_scores = {\\n \\\"Not nested CV\\\": test_score_not_nested,\\n \\\"Nested CV\\\": test_score_nested,\\n}\\nall_scores = pd.DataFrame(all_scores)\",\n \"_____no_output_____\"\n ],\n [\n \"import matplotlib.pyplot as plt\\n\\ncolor = {\\\"whiskers\\\": \\\"black\\\", \\\"medians\\\": \\\"black\\\", \\\"caps\\\": \\\"black\\\"}\\nall_scores.plot.box(color=color, vert=False)\\nplt.xlabel(\\\"Accuracy\\\")\\n_ = plt.title(\\\"Comparison of mean accuracy obtained on the test sets with\\\\n\\\"\\n \\\"and without nested cross-validation\\\")\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"We observe that the model's statistical performance with the nested\\ncross-validation is not as good as the non-nested cross-validation.\\n\\nAs a conclusion, when optimizing parts of the machine learning pipeline (e.g.\\nhyperparameter, transform, etc.), one needs to use nested cross-validation to\\nevaluate the statistical performance of the predictive model. Otherwise, the\\nresults obtained without nested cross-validation are over-optimistic.\",\n \"_____no_output_____\"\n ]\n ]\n]"},"cell_types":{"kind":"list like","value":["markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown"],"string":"[\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\"\n]"},"cell_type_groups":{"kind":"list like","value":[["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code","code"],["markdown"]],"string":"[\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ]\n]"}}},{"rowIdx":1459035,"cells":{"hexsha":{"kind":"string","value":"e7ee597f5c9e452acaa48c9cab84b3fd67d1d1d8"},"size":{"kind":"number","value":55621,"string":"55,621"},"ext":{"kind":"string","value":"ipynb"},"lang":{"kind":"string","value":"Jupyter Notebook"},"max_stars_repo_path":{"kind":"string","value":"Ref/find_eps_entropy/fig1_nvar20_g05_nseq5k.ipynb"},"max_stars_repo_name":{"kind":"string","value":"danhtaihoang/e-machine"},"max_stars_repo_head_hexsha":{"kind":"string","value":"9ff075ce1e476b8136da291b05abb34c71a4df9d"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"Ref/find_eps_entropy/fig1_nvar20_g05_nseq5k.ipynb"},"max_issues_repo_name":{"kind":"string","value":"danhtaihoang/e-machine"},"max_issues_repo_head_hexsha":{"kind":"string","value":"9ff075ce1e476b8136da291b05abb34c71a4df9d"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"Ref/find_eps_entropy/fig1_nvar20_g05_nseq5k.ipynb"},"max_forks_repo_name":{"kind":"string","value":"danhtaihoang/e-machine"},"max_forks_repo_head_hexsha":{"kind":"string","value":"9ff075ce1e476b8136da291b05abb34c71a4df9d"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"avg_line_length":{"kind":"number","value":232.7238493724,"string":"232.723849"},"max_line_length":{"kind":"number","value":50232,"string":"50,232"},"alphanum_fraction":{"kind":"number","value":0.9217921289,"string":"0.921792"},"cells":{"kind":"list like","value":[[["import matplotlib.pyplot as plt\nimport numpy as np\nimport emachine as EM\nimport itertools\nfrom joblib import Parallel, delayed\n#from sklearn.model_selection import train_test_split","_____no_output_____"],["np.random.seed(0)","_____no_output_____"],["n_var = 20 ; g = 0.5 ; n_seq = 5000","_____no_output_____"],["# Synthetic data are generated by using `generate_seq`.\nw_true,seqs = EM.generate_seq(n_var,n_seq,g=g)\nprint(seqs.shape)\n\nops = EM.operators(seqs)\nprint(ops.shape)","(5000, 20)\n(5000, 210)\n"],["# predict interactions w\neps_list = np.linspace(0.1,0.9,9)\nn_eps = len(eps_list)\nres = Parallel(n_jobs = n_eps)(delayed(EM.fit)(ops,eps=eps,max_iter=100) for eps in eps_list)\nw_eps = np.array([res[i][0] for i in range(len(res))])\nw_eps_iter = np.array([res[i][1] for i in range(len(res))])\n\n#e_eps = np.zeros(len(eps_list))\n#w_eps = np.zeros((len(eps_list),ops.shape[1]))\n#for i,eps in enumerate(eps_list):\n# w_eps[i,:],e_eps[i] = EM.fit(ops,w_true,eps=eps,max_iter=100)\n #print('eps and e_eps:',eps,e_eps[i])","_____no_output_____"],["w_eps_iter.shape","_____no_output_____"],["MSE = ((w_true[np.newaxis,np.newaxis,:] - w_eps_iter)**2).mean(axis=2)\nMSE.shape","_____no_output_____"],["# Entropy\n#w_iter_eps[n_eps,n_iter,n_ops]\n#ops[n_seq,n_ops] \nenergy_eps_iter = -np.sum((ops[:,np.newaxis,np.newaxis,:]*w_eps_iter[np.newaxis,:,:,:]),axis=3)\nprob_eps_iter = np.exp(energy_eps_iter) # [n_seq,n_eps,n_iter]\nprob_eps_iter /= prob_eps_iter.sum(axis=0)[np.newaxis,:,:] \nentropy_eps_iter = -(prob_eps_iter*np.log(prob_eps_iter)).sum(axis=0) #[n_eps,n_iter] ","_____no_output_____"],["entropy_eps_iter.shape","_____no_output_____"],["ieps_show = [2,4,8]\n\nnx,ny = 2,2\nfig, ax = plt.subplots(ny,nx,figsize=(nx*3.5,ny*3))\n\nfor i in ieps_show:\n ax[0,0].plot(MSE[i],label='eps=%1.1f'%eps_list[i])\n ax[1,0].plot(entropy_eps_iter[i,:],label='eps=%1.1f'%eps_list[i])\n\nax[0,1].plot(eps_list,MSE[:,-1],'ko-')\n\nax[1,1].plot(eps_list,entropy_eps_iter[:,-1],'ko-',label='final')\nax[1,1].plot(eps_list,entropy_eps_iter[:,:].max(axis=1),'r^--',label='max')\n\nax[0,0].legend()\nax[1,0].legend()\nax[1,1].legend()\n\nax[0,0].set_ylabel('MSE')\nax[0,1].set_ylabel('MSE')\nax[1,0].set_ylabel('Entropy')\nax[1,1].set_ylabel('Entropy')\n\nax[0,0].set_xlabel('Iterations')\nax[0,1].set_xlabel('epsilon')\nax[1,0].set_xlabel('Iterations')\nax[1,1].set_xlabel('epsilon')\n\nplt.tight_layout(h_pad=1, w_pad=1.5)\n#plt.savefig('fig.pdf', format='pdf', dpi=100)","_____no_output_____"]]],"string":"[\n [\n [\n \"import matplotlib.pyplot as plt\\nimport numpy as np\\nimport emachine as EM\\nimport itertools\\nfrom joblib import Parallel, delayed\\n#from sklearn.model_selection import train_test_split\",\n \"_____no_output_____\"\n ],\n [\n \"np.random.seed(0)\",\n \"_____no_output_____\"\n ],\n [\n \"n_var = 20 ; g = 0.5 ; n_seq = 5000\",\n \"_____no_output_____\"\n ],\n [\n \"# Synthetic data are generated by using `generate_seq`.\\nw_true,seqs = EM.generate_seq(n_var,n_seq,g=g)\\nprint(seqs.shape)\\n\\nops = EM.operators(seqs)\\nprint(ops.shape)\",\n \"(5000, 20)\\n(5000, 210)\\n\"\n ],\n [\n \"# predict interactions w\\neps_list = np.linspace(0.1,0.9,9)\\nn_eps = len(eps_list)\\nres = Parallel(n_jobs = n_eps)(delayed(EM.fit)(ops,eps=eps,max_iter=100) for eps in eps_list)\\nw_eps = np.array([res[i][0] for i in range(len(res))])\\nw_eps_iter = np.array([res[i][1] for i in range(len(res))])\\n\\n#e_eps = np.zeros(len(eps_list))\\n#w_eps = np.zeros((len(eps_list),ops.shape[1]))\\n#for i,eps in enumerate(eps_list):\\n# w_eps[i,:],e_eps[i] = EM.fit(ops,w_true,eps=eps,max_iter=100)\\n #print('eps and e_eps:',eps,e_eps[i])\",\n \"_____no_output_____\"\n ],\n [\n \"w_eps_iter.shape\",\n \"_____no_output_____\"\n ],\n [\n \"MSE = ((w_true[np.newaxis,np.newaxis,:] - w_eps_iter)**2).mean(axis=2)\\nMSE.shape\",\n \"_____no_output_____\"\n ],\n [\n \"# Entropy\\n#w_iter_eps[n_eps,n_iter,n_ops]\\n#ops[n_seq,n_ops] \\nenergy_eps_iter = -np.sum((ops[:,np.newaxis,np.newaxis,:]*w_eps_iter[np.newaxis,:,:,:]),axis=3)\\nprob_eps_iter = np.exp(energy_eps_iter) # [n_seq,n_eps,n_iter]\\nprob_eps_iter /= prob_eps_iter.sum(axis=0)[np.newaxis,:,:] \\nentropy_eps_iter = -(prob_eps_iter*np.log(prob_eps_iter)).sum(axis=0) #[n_eps,n_iter] \",\n \"_____no_output_____\"\n ],\n [\n \"entropy_eps_iter.shape\",\n \"_____no_output_____\"\n ],\n [\n \"ieps_show = [2,4,8]\\n\\nnx,ny = 2,2\\nfig, ax = plt.subplots(ny,nx,figsize=(nx*3.5,ny*3))\\n\\nfor i in ieps_show:\\n ax[0,0].plot(MSE[i],label='eps=%1.1f'%eps_list[i])\\n ax[1,0].plot(entropy_eps_iter[i,:],label='eps=%1.1f'%eps_list[i])\\n\\nax[0,1].plot(eps_list,MSE[:,-1],'ko-')\\n\\nax[1,1].plot(eps_list,entropy_eps_iter[:,-1],'ko-',label='final')\\nax[1,1].plot(eps_list,entropy_eps_iter[:,:].max(axis=1),'r^--',label='max')\\n\\nax[0,0].legend()\\nax[1,0].legend()\\nax[1,1].legend()\\n\\nax[0,0].set_ylabel('MSE')\\nax[0,1].set_ylabel('MSE')\\nax[1,0].set_ylabel('Entropy')\\nax[1,1].set_ylabel('Entropy')\\n\\nax[0,0].set_xlabel('Iterations')\\nax[0,1].set_xlabel('epsilon')\\nax[1,0].set_xlabel('Iterations')\\nax[1,1].set_xlabel('epsilon')\\n\\nplt.tight_layout(h_pad=1, w_pad=1.5)\\n#plt.savefig('fig.pdf', format='pdf', dpi=100)\",\n \"_____no_output_____\"\n ]\n ]\n]"},"cell_types":{"kind":"list like","value":["code"],"string":"[\n \"code\"\n]"},"cell_type_groups":{"kind":"list like","value":[["code","code","code","code","code","code","code","code","code","code"]],"string":"[\n [\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\"\n ]\n]"}}},{"rowIdx":1459036,"cells":{"hexsha":{"kind":"string","value":"e7ee655ba4395ce9ca26c6f099abeb207c920eea"},"size":{"kind":"number","value":68951,"string":"68,951"},"ext":{"kind":"string","value":"ipynb"},"lang":{"kind":"string","value":"Jupyter Notebook"},"max_stars_repo_path":{"kind":"string","value":"LCO/Target_E/LCO_create_lightcurveB_and_movie_E.ipynb"},"max_stars_repo_name":{"kind":"string","value":"jielaizhang/pasea"},"max_stars_repo_head_hexsha":{"kind":"string","value":"08b663e27ffc8d2b119bfa6c3a0bcbe901c11b2f"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"LCO/Target_E/LCO_create_lightcurveB_and_movie_E.ipynb"},"max_issues_repo_name":{"kind":"string","value":"jielaizhang/pasea"},"max_issues_repo_head_hexsha":{"kind":"string","value":"08b663e27ffc8d2b119bfa6c3a0bcbe901c11b2f"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"LCO/Target_E/LCO_create_lightcurveB_and_movie_E.ipynb"},"max_forks_repo_name":{"kind":"string","value":"jielaizhang/pasea"},"max_forks_repo_head_hexsha":{"kind":"string","value":"08b663e27ffc8d2b119bfa6c3a0bcbe901c11b2f"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"avg_line_length":{"kind":"number","value":218.8920634921,"string":"218.892063"},"max_line_length":{"kind":"number","value":59788,"string":"59,788"},"alphanum_fraction":{"kind":"number","value":0.9127786399,"string":"0.912779"},"cells":{"kind":"list like","value":[[["import warnings\nwarnings.filterwarnings('ignore')\n\nimport glob\nimport numpy as np\n\nfrom photutils import Background2D, SExtractorBackground\nfrom photutils import DAOStarFinder\nfrom photutils import CircularAperture,aperture_photometry\nfrom photutils.utils import calc_total_error\n\n\nimport astropy.wcs as wcs\nfrom astropy.io import fits\nfrom astropy.stats import sigma_clipped_stats, SigmaClip\nfrom astropy.nddata.utils import Cutout2D\nfrom astropy import units as u\n\nimport matplotlib.pyplot as plt\nfrom astropy.visualization import ZScaleInterval","_____no_output_____"],["mypath = '/Users/jielaizhang/Desktop/waissya/Testdata_Target_E/ASASJ030015-0459.7_20190913_B/'\noutmoviename='mymovie.gif'","_____no_output_____"],["# Load in all of the fits images in the directory and sort it\n\nimage_list = glob.glob(mypath+'*e91.fits.fz')\nimage_list.sort()","_____no_output_____"],["#Make some useful lists of values to track/record\n\nobstime = []\nBmag = []\nVmag = []\nBmag_e = []\nVmag_e = []\navg_offset = []","_____no_output_____"],["# Input the information for the calibration stars identified\n# in the previous notebook for batch processing of all of the images\n\nzpt_instrumental = 25.\n\ntar_ra = 45.064\ntar_dec = -4.995\ntar_color = 'yellow'\nref_ra = [44.93200, 45.00766, 45.11216, 45.12369]\nref_dec = [-5.03533, -4.79669, -4.91007, -4.93852]\nref_colors = ['red','cyan', 'green', 'blue']\nref_mag = [11.275, 12.093, 13.005, 14.65]","_____no_output_____"],["def do_phot_get_mag(data,hdr,err,ra,dec):\n positions = []\n zpt_instrumental = 25.\n w = wcs.WCS(hdr)\n xcoords, ycoords = w.all_world2pix(ra,dec,1)\n positions = np.transpose((xcoords, ycoords))\n apertures = CircularAperture(positions, r=24.)\n phot = aperture_photometry(data, apertures, error=err)\n\n mag = list(-2.5*np.log10(phot['aperture_sum']) + zpt_instrumental)\n dmag = list((2.5/np.log(10))*(phot['aperture_sum_err']/phot['aperture_sum']))\n \n return mag,dmag","_____no_output_____"],["def make_cutout(data,hdr,ra,dec):\n\n w = wcs.WCS(hdr)\n xcoord, ycoord = w.all_world2pix(ra,dec,1)\n position = np.transpose((xcoord, ycoord))\n size = u.Quantity([120, 120], u.pixel)\n cutout = Cutout2D(data, position, size, wcs=w, mode='strict')\n\n cutout_wcs = cutout.wcs\n header = cutout_wcs.to_header()\n hdu = fits.PrimaryHDU(data=cutout.data, header=header)\n\n return hdu","_____no_output_____"],["# Let's calculate the star's mag for *each* frame in the dataset\n\nfor frame in image_list:\n hdu = fits.open(frame)\n\n # Grab the actual science data based on above.\n sci_data = hdu[1]\n sci_hdr = sci_data.header\n time = sci_hdr['MJD-OBS']\n obstime.append(time)\n\n # Background estimation:\n sigma_clip = SigmaClip(sigma=3.) # Sigma clip bright obvious things to avoid biasing the background estimate\n bkg_estimator = SExtractorBackground() # Apply the SExtractor algorithm to our estimation\n bkg = Background2D(\n sci_data.data, (50, 50),\n filter_size=(3, 3),\n sigma_clip=sigma_clip,\n bkg_estimator=bkg_estimator)\n\n # Now let's subtract the background from the data\n sci_bkg = sci_data.data - bkg.background\n\n # Define an error image that will be used when calculating photometry\n effective_gain = 1.\n error = calc_total_error(sci_bkg, bkg.background_rms, effective_gain)\n\n # Calculate instrumental mags for each of the reference stars\n cal_mag,cal_dmag = do_phot_get_mag(sci_bkg,sci_hdr,error,ref_ra,ref_dec)\n\n # Calculate offsets and the standard deviation of the offset from each star.\n offsets = []\n for i in range(len(cal_mag)):\n offsets.append(ref_mag[i] - cal_mag[i])\n offset = np.mean(offsets)\n avg_offset.append(offset)\n doffset = np.std(offsets)\n \n # Do photometry on the variable target!!\n tar_mag,tar_dmag = do_phot_get_mag(sci_bkg,sci_hdr,error,tar_ra,tar_dec)\n \n cal_tar_mag = tar_mag[0]+offset\n cal_tar_dmag = np.sqrt(tar_dmag[0]**2.+doffset**2.)\n \n Bmag.append(cal_tar_mag)\n Bmag_e.append(cal_tar_dmag)\n \n # Make tiny cutouts of the variable star in each frame\n cutout_hdu = make_cutout(sci_bkg,sci_hdr,tar_ra,tar_dec)\n #cutout_hdu.writeto(frame+'_cutout.fits', overwrite=True)\n \n # Plot figures using these cutouts and output images\n interval = ZScaleInterval()\n vmin = interval.get_limits(cutout_hdu.data)[0]\n vmax = interval.get_limits(cutout_hdu.data)[1]\n\n plt.subplot(projection=wcs.WCS(cutout_hdu.header))\n plt.imshow(cutout_hdu.data, vmin=vmin, vmax=vmax, origin='lower')\n plt.xlabel('R.A.')\n plt.ylabel('Declination')\n \n pngname = str(time).replace('.','')\n plt.savefig(mypath+pngname+'.png', overwrite=True)\n \nBmag = np.array(Bmag)","_____no_output_____"],["# # Make a rudimentary lightcurve\n\nplt.figure(figsize=(10.5, 7))\nplt.errorbar(obstime,Bmag,xerr=None,yerr=Bmag_e, fmt='mo', capsize=9.0)\nplt.xlabel('MJD', fontsize=18)\nplt.ylabel('B Magnitude', fontsize=18)\nplt.show()","_____no_output_____"],["# Here we are going to use the cutouts we made above to make\n# an little movie of the variable star target changing brightness\n# over time and loop it!\n\nimport imageio\n\ncutout_list = glob.glob(mypath+'*.png')\ncutout_list.sort()\n\ncutout_frames = []\nfor file in cutout_list:\n cutout_frames.append(imageio.imread(file))\nimageio.mimsave(mypath+outmoviename, cutout_frames)","_____no_output_____"],["print(obstime)\nprint(list(Bmag))\nprint(Bmag_e)","_____no_output_____"]]],"string":"[\n [\n [\n \"import warnings\\nwarnings.filterwarnings('ignore')\\n\\nimport glob\\nimport numpy as np\\n\\nfrom photutils import Background2D, SExtractorBackground\\nfrom photutils import DAOStarFinder\\nfrom photutils import CircularAperture,aperture_photometry\\nfrom photutils.utils import calc_total_error\\n\\n\\nimport astropy.wcs as wcs\\nfrom astropy.io import fits\\nfrom astropy.stats import sigma_clipped_stats, SigmaClip\\nfrom astropy.nddata.utils import Cutout2D\\nfrom astropy import units as u\\n\\nimport matplotlib.pyplot as plt\\nfrom astropy.visualization import ZScaleInterval\",\n \"_____no_output_____\"\n ],\n [\n \"mypath = '/Users/jielaizhang/Desktop/waissya/Testdata_Target_E/ASASJ030015-0459.7_20190913_B/'\\noutmoviename='mymovie.gif'\",\n \"_____no_output_____\"\n ],\n [\n \"# Load in all of the fits images in the directory and sort it\\n\\nimage_list = glob.glob(mypath+'*e91.fits.fz')\\nimage_list.sort()\",\n \"_____no_output_____\"\n ],\n [\n \"#Make some useful lists of values to track/record\\n\\nobstime = []\\nBmag = []\\nVmag = []\\nBmag_e = []\\nVmag_e = []\\navg_offset = []\",\n \"_____no_output_____\"\n ],\n [\n \"# Input the information for the calibration stars identified\\n# in the previous notebook for batch processing of all of the images\\n\\nzpt_instrumental = 25.\\n\\ntar_ra = 45.064\\ntar_dec = -4.995\\ntar_color = 'yellow'\\nref_ra = [44.93200, 45.00766, 45.11216, 45.12369]\\nref_dec = [-5.03533, -4.79669, -4.91007, -4.93852]\\nref_colors = ['red','cyan', 'green', 'blue']\\nref_mag = [11.275, 12.093, 13.005, 14.65]\",\n \"_____no_output_____\"\n ],\n [\n \"def do_phot_get_mag(data,hdr,err,ra,dec):\\n positions = []\\n zpt_instrumental = 25.\\n w = wcs.WCS(hdr)\\n xcoords, ycoords = w.all_world2pix(ra,dec,1)\\n positions = np.transpose((xcoords, ycoords))\\n apertures = CircularAperture(positions, r=24.)\\n phot = aperture_photometry(data, apertures, error=err)\\n\\n mag = list(-2.5*np.log10(phot['aperture_sum']) + zpt_instrumental)\\n dmag = list((2.5/np.log(10))*(phot['aperture_sum_err']/phot['aperture_sum']))\\n \\n return mag,dmag\",\n \"_____no_output_____\"\n ],\n [\n \"def make_cutout(data,hdr,ra,dec):\\n\\n w = wcs.WCS(hdr)\\n xcoord, ycoord = w.all_world2pix(ra,dec,1)\\n position = np.transpose((xcoord, ycoord))\\n size = u.Quantity([120, 120], u.pixel)\\n cutout = Cutout2D(data, position, size, wcs=w, mode='strict')\\n\\n cutout_wcs = cutout.wcs\\n header = cutout_wcs.to_header()\\n hdu = fits.PrimaryHDU(data=cutout.data, header=header)\\n\\n return hdu\",\n \"_____no_output_____\"\n ],\n [\n \"# Let's calculate the star's mag for *each* frame in the dataset\\n\\nfor frame in image_list:\\n hdu = fits.open(frame)\\n\\n # Grab the actual science data based on above.\\n sci_data = hdu[1]\\n sci_hdr = sci_data.header\\n time = sci_hdr['MJD-OBS']\\n obstime.append(time)\\n\\n # Background estimation:\\n sigma_clip = SigmaClip(sigma=3.) # Sigma clip bright obvious things to avoid biasing the background estimate\\n bkg_estimator = SExtractorBackground() # Apply the SExtractor algorithm to our estimation\\n bkg = Background2D(\\n sci_data.data, (50, 50),\\n filter_size=(3, 3),\\n sigma_clip=sigma_clip,\\n bkg_estimator=bkg_estimator)\\n\\n # Now let's subtract the background from the data\\n sci_bkg = sci_data.data - bkg.background\\n\\n # Define an error image that will be used when calculating photometry\\n effective_gain = 1.\\n error = calc_total_error(sci_bkg, bkg.background_rms, effective_gain)\\n\\n # Calculate instrumental mags for each of the reference stars\\n cal_mag,cal_dmag = do_phot_get_mag(sci_bkg,sci_hdr,error,ref_ra,ref_dec)\\n\\n # Calculate offsets and the standard deviation of the offset from each star.\\n offsets = []\\n for i in range(len(cal_mag)):\\n offsets.append(ref_mag[i] - cal_mag[i])\\n offset = np.mean(offsets)\\n avg_offset.append(offset)\\n doffset = np.std(offsets)\\n \\n # Do photometry on the variable target!!\\n tar_mag,tar_dmag = do_phot_get_mag(sci_bkg,sci_hdr,error,tar_ra,tar_dec)\\n \\n cal_tar_mag = tar_mag[0]+offset\\n cal_tar_dmag = np.sqrt(tar_dmag[0]**2.+doffset**2.)\\n \\n Bmag.append(cal_tar_mag)\\n Bmag_e.append(cal_tar_dmag)\\n \\n # Make tiny cutouts of the variable star in each frame\\n cutout_hdu = make_cutout(sci_bkg,sci_hdr,tar_ra,tar_dec)\\n #cutout_hdu.writeto(frame+'_cutout.fits', overwrite=True)\\n \\n # Plot figures using these cutouts and output images\\n interval = ZScaleInterval()\\n vmin = interval.get_limits(cutout_hdu.data)[0]\\n vmax = interval.get_limits(cutout_hdu.data)[1]\\n\\n plt.subplot(projection=wcs.WCS(cutout_hdu.header))\\n plt.imshow(cutout_hdu.data, vmin=vmin, vmax=vmax, origin='lower')\\n plt.xlabel('R.A.')\\n plt.ylabel('Declination')\\n \\n pngname = str(time).replace('.','')\\n plt.savefig(mypath+pngname+'.png', overwrite=True)\\n \\nBmag = np.array(Bmag)\",\n \"_____no_output_____\"\n ],\n [\n \"# # Make a rudimentary lightcurve\\n\\nplt.figure(figsize=(10.5, 7))\\nplt.errorbar(obstime,Bmag,xerr=None,yerr=Bmag_e, fmt='mo', capsize=9.0)\\nplt.xlabel('MJD', fontsize=18)\\nplt.ylabel('B Magnitude', fontsize=18)\\nplt.show()\",\n \"_____no_output_____\"\n ],\n [\n \"# Here we are going to use the cutouts we made above to make\\n# an little movie of the variable star target changing brightness\\n# over time and loop it!\\n\\nimport imageio\\n\\ncutout_list = glob.glob(mypath+'*.png')\\ncutout_list.sort()\\n\\ncutout_frames = []\\nfor file in cutout_list:\\n cutout_frames.append(imageio.imread(file))\\nimageio.mimsave(mypath+outmoviename, cutout_frames)\",\n \"_____no_output_____\"\n ],\n [\n \"print(obstime)\\nprint(list(Bmag))\\nprint(Bmag_e)\",\n \"_____no_output_____\"\n ]\n ]\n]"},"cell_types":{"kind":"list like","value":["code"],"string":"[\n \"code\"\n]"},"cell_type_groups":{"kind":"list like","value":[["code","code","code","code","code","code","code","code","code","code","code"]],"string":"[\n [\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\"\n ]\n]"}}},{"rowIdx":1459037,"cells":{"hexsha":{"kind":"string","value":"e7ee6a0421ef209065fff0006f30a659cd4c070d"},"size":{"kind":"number","value":15060,"string":"15,060"},"ext":{"kind":"string","value":"ipynb"},"lang":{"kind":"string","value":"Jupyter Notebook"},"max_stars_repo_path":{"kind":"string","value":"aata/section-linear-codes.ipynb"},"max_stars_repo_name":{"kind":"string","value":"johnperry-math/cocalc-examples"},"max_stars_repo_head_hexsha":{"kind":"string","value":"394479e972dc2b74211113bbb43bc1ec4ec9978c"},"max_stars_repo_licenses":{"kind":"list like","value":["Apache-2.0","CC-BY-4.0"],"string":"[\n \"Apache-2.0\",\n \"CC-BY-4.0\"\n]"},"max_stars_count":{"kind":"number","value":13,"string":"13"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2017-09-06T23:04:59.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2021-04-05T11:08:51.000Z"},"max_issues_repo_path":{"kind":"string","value":"aata/section-linear-codes.ipynb"},"max_issues_repo_name":{"kind":"string","value":"johnperry-math/cocalc-examples"},"max_issues_repo_head_hexsha":{"kind":"string","value":"394479e972dc2b74211113bbb43bc1ec4ec9978c"},"max_issues_repo_licenses":{"kind":"list like","value":["Apache-2.0","CC-BY-4.0"],"string":"[\n \"Apache-2.0\",\n \"CC-BY-4.0\"\n]"},"max_issues_count":{"kind":"number","value":9,"string":"9"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2018-02-01T15:58:28.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2021-07-14T15:18:35.000Z"},"max_forks_repo_path":{"kind":"string","value":"aata/section-linear-codes.ipynb"},"max_forks_repo_name":{"kind":"string","value":"johnperry-math/cocalc-examples"},"max_forks_repo_head_hexsha":{"kind":"string","value":"394479e972dc2b74211113bbb43bc1ec4ec9978c"},"max_forks_repo_licenses":{"kind":"list like","value":["Apache-2.0","CC-BY-4.0"],"string":"[\n \"Apache-2.0\",\n \"CC-BY-4.0\"\n]"},"max_forks_count":{"kind":"number","value":10,"string":"10"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2017-10-26T17:30:03.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2021-12-11T07:25:28.000Z"},"avg_line_length":{"kind":"number","value":627.5,"string":"627.5"},"max_line_length":{"kind":"number","value":1402,"string":"1,402"},"alphanum_fraction":{"kind":"number","value":0.6436918991,"string":"0.643692"},"cells":{"kind":"list like","value":[[["%%html\n\n\n\n\n\n\n","_____no_output_____"]],[["**Important:** to view this notebook properly you will need to execute the cell above, which assumes you have an Internet connection. It should already be selected, or place your cursor anywhere above to select. Then press the \"Run\" button in the menu bar above (the right-pointing arrowhead), or press Shift-Enter on your keyboard.","_____no_output_____"],["$\\newcommand{\\identity}{\\mathrm{id}}\n\\newcommand{\\notdivide}{\\nmid}\n\\newcommand{\\notsubset}{\\not\\subset}\n\\newcommand{\\lcm}{\\operatorname{lcm}}\n\\newcommand{\\gf}{\\operatorname{GF}}\n\\newcommand{\\inn}{\\operatorname{Inn}}\n\\newcommand{\\aut}{\\operatorname{Aut}}\n\\newcommand{\\Hom}{\\operatorname{Hom}}\n\\newcommand{\\cis}{\\operatorname{cis}}\n\\newcommand{\\chr}{\\operatorname{char}}\n\\newcommand{\\Null}{\\operatorname{Null}}\n\\newcommand{\\lt}{<}\n\\newcommand{\\gt}{>}\n\\newcommand{\\amp}{&}\n$","_____no_output_____"],["

Section8.2Linear Codes

","_____no_output_____"],["
","_____no_output_____"],["

To gain more knowledge of a particular code and develop more efficient techniques of encoding, decoding, and error detection, we need to add additional structure to our codes. One way to accomplish this is to require that the code also be a group. A group code is a code that is also a subgroup of ${\\mathbb Z}_2^n\\text{.}$

","_____no_output_____"],["

To check that a code is a group code, we need only verify one thing. If we add any two elements in the code, the result must be an $n$-tuple that is again in the code. It is not necessary to check that the inverse of the $n$-tuple is in the code, since every codeword is its own inverse, nor is it necessary to check that ${\\mathbf 0}$ is a codeword. For instance,

\n\\begin{equation*}\n(11000101) + (11000101) = (00000000).\n\\end{equation*}\n
","_____no_output_____"],["
Example8.16

Suppose that we have a code that consists of the following 7-tuples:

\n\\begin{align*}\n&(0000000) & & (0001111) & & (0010101) & & (0011010)\\\\\n&(0100110) & & (0101001) & & (0110011) & & (0111100)\\\\\n&(1000011) & & (1001100) & & (1010110) & & (1011001)\\\\\n&(1100101) & & (1101010) & & (1110000) & & (1111111).\n\\end{align*}\n

It is a straightforward though tedious task to verify that this code is also a subgroup of ${\\mathbb Z}_2^7$ and, therefore, a group code. This code is a single error-detecting and single error-correcting code, but it is a long and tedious process to compute all of the distances between pairs of codewords to determine that $d_{\\min} = 3\\text{.}$ It is much easier to see that the minimum weight of all the nonzero codewords is 3. As we will soon see, this is no coincidence. However, the relationship between weights and distances in a particular code is heavily dependent on the fact that the code is a group.

","_____no_output_____"],["
Lemma8.17

Let ${\\mathbf x}$ and ${\\mathbf y}$ be binary $n$-tuples. Then $w({\\mathbf x} + {\\mathbf y}) = d({\\mathbf x}, {\\mathbf y})\\text{.}$

Proof

Suppose that ${\\mathbf x}$ and ${\\mathbf y}$ are binary $n$-tuples. Then the distance between ${\\mathbf x}$ and ${\\mathbf y}$ is exactly the number of places in which ${\\mathbf x}$ and ${\\mathbf y}$ differ. But ${\\mathbf x}$ and ${\\mathbf y}$ differ in a particular coordinate exactly when the sum in the coordinate is 1, since

\n\\begin{align*}\n1 + 1 & = 0\\\\\n0 + 0 & = 0\\\\\n1 + 0 & = 1\\\\\n0 + 1 & = 1.\n\\end{align*}\n

Consequently, the weight of the sum must be the distance between the two codewords.

","_____no_output_____"],["
Theorem8.18

Let $d_{\\min}$ be the minimum distance for a group code $C\\text{.}$ Then $d_{\\min}$ is the minimum of all the nonzero weights of the nonzero codewords in $C\\text{.}$ That is,

\n\\begin{equation*}\nd_{\\min} = \\min\\{ w({\\mathbf x}) : { {\\mathbf x} \\neq {\\mathbf 0} } \\}.\n\\end{equation*}\n
Proof

Observe that

\n\\begin{align*}\nd_{\\min} & = \\min \\{ d({\\mathbf x},{\\mathbf y}) : {\\mathbf x}\\neq{\\mathbf y} \\}\\\\\n&= \\min \\{ d({\\mathbf x},{\\mathbf y}) : {\\mathbf x}+{\\mathbf y} \\neq {\\mathbf 0} \\}\\\\\n&= \\min\\{ w({\\mathbf x} + {\\mathbf y}) : {\\mathbf x}+{\\mathbf y}\\neq {\\mathbf 0} \\}\\\\\n& = \\min\\{ w({\\mathbf z}) : {\\mathbf z} \\neq {\\mathbf 0} \\}.\n\\end{align*}\n
","_____no_output_____"],["

SubsectionLinear Codes

","_____no_output_____"],["

From Example 8.16, it is now easy to check that the minimum nonzero weight is 3; hence, the code does indeed detect and correct all single errors. We have now reduced the problem of finding “good” codes to that of generating group codes. One easy way to generate group codes is to employ a bit of matrix theory.

","_____no_output_____"],["

Define the inner product of two binary $n$-tuples to be

\n\\begin{equation*}\n{\\mathbf x} \\cdot {\\mathbf y} = x_1 y_1 + \\cdots + x_n y_n,\n\\end{equation*}\n

where ${\\mathbf x} = (x_1, x_2, \\ldots, x_n)^{\\rm t}$ and ${\\mathbf y} = (y_1, y_2, \\ldots, y_n)^{\\rm t}$ are column vectors. 4 Since we will be working with matrices, we will write binary $n$-tuples as column vectors for the remainder of this chapter. For example, if ${\\mathbf x} = (011001)^{\\rm t}$ and ${\\mathbf y} = (110101)^{\\rm t}\\text{,}$ then ${\\mathbf x} \\cdot {\\mathbf y} = 0\\text{.}$ We can also look at an inner product as the product of a row matrix with a column matrix; that is,

\n\\begin{align*}\n{\\mathbf x} \\cdot {\\mathbf y} & = {\\mathbf x}^{\\rm t} {\\mathbf y}\\\\\n& =\n\\begin{pmatrix}\nx_1 & x_2 & \\cdots & x_n\n\\end{pmatrix}\n\\begin{pmatrix}\ny_1 \\\\ y_2 \\\\ \\vdots \\\\ y_n\n\\end{pmatrix}\\\\\n& = x_{1}y_{1} + x_{2}y_{2} + \\cdots + x_{n}y_{n}.\n\\end{align*}\n
","_____no_output_____"],["
Example8.19

Suppose that the words to be encoded consist of all binary 3-tuples and that our encoding scheme is even-parity. To encode an arbitrary 3-tuple, we add a fourth bit to obtain an even number of 1s. Notice that an arbitrary $n$-tuple ${\\mathbf x} = (x_1, x_2, \\ldots, x_n)^{\\rm t}$ has an even number of 1s exactly when $x_1 + x_2 + \\cdots + x_n = 0\\text{;}$ hence, a 4-tuple ${\\mathbf x} = (x_1, x_2, x_3, x_4)^{\\rm t}$ has an even number of 1s if $x_1+ x_2+ x_3+ x_4 = 0\\text{,}$ or

\n\\begin{equation*}\n{\\mathbf x} \\cdot {\\mathbf 1} = {\\mathbf x}^{\\rm t} {\\mathbf 1} =\n\\begin{pmatrix}\nx_1 & x_2 & x_3 & x_4\n\\end{pmatrix}\n\\begin{pmatrix}\n1 \\\\ 1 \\\\ 1 \\\\ 1\n\\end{pmatrix} = 0.\n\\end{equation*}\n

This example leads us to hope that there is a connection between matrices and coding theory.

","_____no_output_____"],["

Let ${\\mathbb M}_{m \\times n}({\\mathbb Z}_2)$ denote the set of all $m \\times n$ matrices with entries in ${\\mathbb Z}_2\\text{.}$ We do matrix operations as usual except that all our addition and multiplication operations occur in ${\\mathbb Z}_2\\text{.}$ Define the null space of a matrix $H \\in {\\mathbb M}_{m \\times n}({\\mathbb Z}_2)$ to be the set of all binary $n$-tuples ${\\mathbf x}$ such that $H{\\mathbf x} = {\\mathbf 0}\\text{.}$ We denote the null space of a matrix $H$ by $\\Null(H)\\text{.}$

","_____no_output_____"],["
Example8.20

Suppose that

\n\\begin{equation*}\nH =\n\\begin{pmatrix}\n0 & 1 & 0 & 1 & 0 \\\\\n1 & 1 & 1 & 1 & 0 \\\\\n0 & 0 & 1 & 1 & 1\n\\end{pmatrix}.\n\\end{equation*}\n

For a 5-tuple ${\\mathbf x} = (x_1, x_2, x_3, x_4, x_5)^{\\rm t}$ to be in the null space of $H\\text{,}$ $H{\\mathbf x} = {\\mathbf 0}\\text{.}$ Equivalently, the following system of equations must be satisfied:

\n\\begin{align*}\nx_2 + x_4 & = 0\\\\\nx_1 + x_2 + x_3 + x_4 & = 0\\\\\nx_3 + x_4 + x_5 & = 0.\n\\end{align*}\n

The set of binary 5-tuples satisfying these equations is

\n\\begin{equation*}\n(00000) \\qquad (11110) \\qquad (10101) \\qquad (01011).\n\\end{equation*}\n

This code is easily determined to be a group code.

","_____no_output_____"],["
Theorem8.21

Let $H$ be in ${\\mathbb M}_{m \\times n}({\\mathbb Z}_2)\\text{.}$ Then the null space of $H$ is a group code.

Proof

Since each element of ${\\mathbb Z}_2^n$ is its own inverse, the only thing that really needs to be checked here is closure. Let ${\\mathbf x}, {\\mathbf y} \\in {\\rm Null}(H)$ for some matrix $H$ in ${\\mathbb M}_{m \\times n}({\\mathbb Z}_2)\\text{.}$ Then $H{\\mathbf x} = {\\mathbf 0}$ and $H{\\mathbf y} = {\\mathbf 0}\\text{.}$ So

\n\\begin{equation*}\nH({\\mathbf x}+{\\mathbf y}) = H{\\mathbf x} + H{\\mathbf y} = {\\mathbf 0} + {\\mathbf 0} = {\\mathbf 0}.\n\\end{equation*}\n

Hence, ${\\mathbf x} + {\\mathbf y}$ is in the null space of $H$ and therefore must be a codeword.

","_____no_output_____"],["

A code is a linear code if it is determined by the null space of some matrix $H \\in {\\mathbb M}_{m \\times n}({\\mathbb Z}_2)\\text{.}$

","_____no_output_____"],["
Example8.22

Let $C$ be the code given by the matrix

\n\\begin{equation*}\nH =\n\\begin{pmatrix}\n0 & 0 & 0 & 1 & 1 & 1 \\\\\n0 & 1 & 1 & 0 & 1 & 1 \\\\\n1 & 0 & 1 & 0 & 0 & 1\n\\end{pmatrix}.\n\\end{equation*}\n

Suppose that the 6-tuple ${\\mathbf x} = (010011)^{\\rm t}$ is received. It is a simple matter of matrix multiplication to determine whether or not ${\\mathbf x}$ is a codeword. Since

\n\\begin{equation*}\nH{\\mathbf x} =\n\\begin{pmatrix} \n0 \\\\ 1 \\\\ 1\n\\end{pmatrix},\n\\end{equation*}\n

the received word is not a codeword. We must either attempt to correct the word or request that it be transmitted again.

","_____no_output_____"]]],"string":"[\n [\n [\n \"%%html\\n\\n\\n\\n\\n\\n\\n\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"**Important:** to view this notebook properly you will need to execute the cell above, which assumes you have an Internet connection. It should already be selected, or place your cursor anywhere above to select. Then press the \\\"Run\\\" button in the menu bar above (the right-pointing arrowhead), or press Shift-Enter on your keyboard.\",\n \"_____no_output_____\"\n ],\n [\n \"$\\\\newcommand{\\\\identity}{\\\\mathrm{id}}\\n\\\\newcommand{\\\\notdivide}{\\\\nmid}\\n\\\\newcommand{\\\\notsubset}{\\\\not\\\\subset}\\n\\\\newcommand{\\\\lcm}{\\\\operatorname{lcm}}\\n\\\\newcommand{\\\\gf}{\\\\operatorname{GF}}\\n\\\\newcommand{\\\\inn}{\\\\operatorname{Inn}}\\n\\\\newcommand{\\\\aut}{\\\\operatorname{Aut}}\\n\\\\newcommand{\\\\Hom}{\\\\operatorname{Hom}}\\n\\\\newcommand{\\\\cis}{\\\\operatorname{cis}}\\n\\\\newcommand{\\\\chr}{\\\\operatorname{char}}\\n\\\\newcommand{\\\\Null}{\\\\operatorname{Null}}\\n\\\\newcommand{\\\\lt}{<}\\n\\\\newcommand{\\\\gt}{>}\\n\\\\newcommand{\\\\amp}{&}\\n$\",\n \"_____no_output_____\"\n ],\n [\n \"

Section8.2Linear Codes

\",\n \"_____no_output_____\"\n ],\n [\n \"
\",\n \"_____no_output_____\"\n ],\n [\n \"

To gain more knowledge of a particular code and develop more efficient techniques of encoding, decoding, and error detection, we need to add additional structure to our codes. One way to accomplish this is to require that the code also be a group. A group code is a code that is also a subgroup of ${\\\\mathbb Z}_2^n\\\\text{.}$

\",\n \"_____no_output_____\"\n ],\n [\n \"

To check that a code is a group code, we need only verify one thing. If we add any two elements in the code, the result must be an $n$-tuple that is again in the code. It is not necessary to check that the inverse of the $n$-tuple is in the code, since every codeword is its own inverse, nor is it necessary to check that ${\\\\mathbf 0}$ is a codeword. For instance,

\\n\\\\begin{equation*}\\n(11000101) + (11000101) = (00000000).\\n\\\\end{equation*}\\n
\",\n \"_____no_output_____\"\n ],\n [\n \"
Example8.16

Suppose that we have a code that consists of the following 7-tuples:

\\n\\\\begin{align*}\\n&(0000000) & & (0001111) & & (0010101) & & (0011010)\\\\\\\\\\n&(0100110) & & (0101001) & & (0110011) & & (0111100)\\\\\\\\\\n&(1000011) & & (1001100) & & (1010110) & & (1011001)\\\\\\\\\\n&(1100101) & & (1101010) & & (1110000) & & (1111111).\\n\\\\end{align*}\\n

It is a straightforward though tedious task to verify that this code is also a subgroup of ${\\\\mathbb Z}_2^7$ and, therefore, a group code. This code is a single error-detecting and single error-correcting code, but it is a long and tedious process to compute all of the distances between pairs of codewords to determine that $d_{\\\\min} = 3\\\\text{.}$ It is much easier to see that the minimum weight of all the nonzero codewords is 3. As we will soon see, this is no coincidence. However, the relationship between weights and distances in a particular code is heavily dependent on the fact that the code is a group.

\",\n \"_____no_output_____\"\n ],\n [\n \"
Lemma8.17

Let ${\\\\mathbf x}$ and ${\\\\mathbf y}$ be binary $n$-tuples. Then $w({\\\\mathbf x} + {\\\\mathbf y}) = d({\\\\mathbf x}, {\\\\mathbf y})\\\\text{.}$

Proof

Suppose that ${\\\\mathbf x}$ and ${\\\\mathbf y}$ are binary $n$-tuples. Then the distance between ${\\\\mathbf x}$ and ${\\\\mathbf y}$ is exactly the number of places in which ${\\\\mathbf x}$ and ${\\\\mathbf y}$ differ. But ${\\\\mathbf x}$ and ${\\\\mathbf y}$ differ in a particular coordinate exactly when the sum in the coordinate is 1, since

\\n\\\\begin{align*}\\n1 + 1 & = 0\\\\\\\\\\n0 + 0 & = 0\\\\\\\\\\n1 + 0 & = 1\\\\\\\\\\n0 + 1 & = 1.\\n\\\\end{align*}\\n

Consequently, the weight of the sum must be the distance between the two codewords.

\",\n \"_____no_output_____\"\n ],\n [\n \"
Theorem8.18

Let $d_{\\\\min}$ be the minimum distance for a group code $C\\\\text{.}$ Then $d_{\\\\min}$ is the minimum of all the nonzero weights of the nonzero codewords in $C\\\\text{.}$ That is,

\\n\\\\begin{equation*}\\nd_{\\\\min} = \\\\min\\\\{ w({\\\\mathbf x}) : { {\\\\mathbf x} \\\\neq {\\\\mathbf 0} } \\\\}.\\n\\\\end{equation*}\\n
Proof

Observe that

\\n\\\\begin{align*}\\nd_{\\\\min} & = \\\\min \\\\{ d({\\\\mathbf x},{\\\\mathbf y}) : {\\\\mathbf x}\\\\neq{\\\\mathbf y} \\\\}\\\\\\\\\\n&= \\\\min \\\\{ d({\\\\mathbf x},{\\\\mathbf y}) : {\\\\mathbf x}+{\\\\mathbf y} \\\\neq {\\\\mathbf 0} \\\\}\\\\\\\\\\n&= \\\\min\\\\{ w({\\\\mathbf x} + {\\\\mathbf y}) : {\\\\mathbf x}+{\\\\mathbf y}\\\\neq {\\\\mathbf 0} \\\\}\\\\\\\\\\n& = \\\\min\\\\{ w({\\\\mathbf z}) : {\\\\mathbf z} \\\\neq {\\\\mathbf 0} \\\\}.\\n\\\\end{align*}\\n
\",\n \"_____no_output_____\"\n ],\n [\n \"

SubsectionLinear Codes

\",\n \"_____no_output_____\"\n ],\n [\n \"

From Example 8.16, it is now easy to check that the minimum nonzero weight is 3; hence, the code does indeed detect and correct all single errors. We have now reduced the problem of finding “good” codes to that of generating group codes. One easy way to generate group codes is to employ a bit of matrix theory.

\",\n \"_____no_output_____\"\n ],\n [\n \"

Define the inner product of two binary $n$-tuples to be

\\n\\\\begin{equation*}\\n{\\\\mathbf x} \\\\cdot {\\\\mathbf y} = x_1 y_1 + \\\\cdots + x_n y_n,\\n\\\\end{equation*}\\n

where ${\\\\mathbf x} = (x_1, x_2, \\\\ldots, x_n)^{\\\\rm t}$ and ${\\\\mathbf y} = (y_1, y_2, \\\\ldots, y_n)^{\\\\rm t}$ are column vectors. 4 Since we will be working with matrices, we will write binary $n$-tuples as column vectors for the remainder of this chapter. For example, if ${\\\\mathbf x} = (011001)^{\\\\rm t}$ and ${\\\\mathbf y} = (110101)^{\\\\rm t}\\\\text{,}$ then ${\\\\mathbf x} \\\\cdot {\\\\mathbf y} = 0\\\\text{.}$ We can also look at an inner product as the product of a row matrix with a column matrix; that is,

\\n\\\\begin{align*}\\n{\\\\mathbf x} \\\\cdot {\\\\mathbf y} & = {\\\\mathbf x}^{\\\\rm t} {\\\\mathbf y}\\\\\\\\\\n& =\\n\\\\begin{pmatrix}\\nx_1 & x_2 & \\\\cdots & x_n\\n\\\\end{pmatrix}\\n\\\\begin{pmatrix}\\ny_1 \\\\\\\\ y_2 \\\\\\\\ \\\\vdots \\\\\\\\ y_n\\n\\\\end{pmatrix}\\\\\\\\\\n& = x_{1}y_{1} + x_{2}y_{2} + \\\\cdots + x_{n}y_{n}.\\n\\\\end{align*}\\n
\",\n \"_____no_output_____\"\n ],\n [\n \"
Example8.19

Suppose that the words to be encoded consist of all binary 3-tuples and that our encoding scheme is even-parity. To encode an arbitrary 3-tuple, we add a fourth bit to obtain an even number of 1s. Notice that an arbitrary $n$-tuple ${\\\\mathbf x} = (x_1, x_2, \\\\ldots, x_n)^{\\\\rm t}$ has an even number of 1s exactly when $x_1 + x_2 + \\\\cdots + x_n = 0\\\\text{;}$ hence, a 4-tuple ${\\\\mathbf x} = (x_1, x_2, x_3, x_4)^{\\\\rm t}$ has an even number of 1s if $x_1+ x_2+ x_3+ x_4 = 0\\\\text{,}$ or

\\n\\\\begin{equation*}\\n{\\\\mathbf x} \\\\cdot {\\\\mathbf 1} = {\\\\mathbf x}^{\\\\rm t} {\\\\mathbf 1} =\\n\\\\begin{pmatrix}\\nx_1 & x_2 & x_3 & x_4\\n\\\\end{pmatrix}\\n\\\\begin{pmatrix}\\n1 \\\\\\\\ 1 \\\\\\\\ 1 \\\\\\\\ 1\\n\\\\end{pmatrix} = 0.\\n\\\\end{equation*}\\n

This example leads us to hope that there is a connection between matrices and coding theory.

\",\n \"_____no_output_____\"\n ],\n [\n \"

Let ${\\\\mathbb M}_{m \\\\times n}({\\\\mathbb Z}_2)$ denote the set of all $m \\\\times n$ matrices with entries in ${\\\\mathbb Z}_2\\\\text{.}$ We do matrix operations as usual except that all our addition and multiplication operations occur in ${\\\\mathbb Z}_2\\\\text{.}$ Define the null space of a matrix $H \\\\in {\\\\mathbb M}_{m \\\\times n}({\\\\mathbb Z}_2)$ to be the set of all binary $n$-tuples ${\\\\mathbf x}$ such that $H{\\\\mathbf x} = {\\\\mathbf 0}\\\\text{.}$ We denote the null space of a matrix $H$ by $\\\\Null(H)\\\\text{.}$

\",\n \"_____no_output_____\"\n ],\n [\n \"
Example8.20

Suppose that

\\n\\\\begin{equation*}\\nH =\\n\\\\begin{pmatrix}\\n0 & 1 & 0 & 1 & 0 \\\\\\\\\\n1 & 1 & 1 & 1 & 0 \\\\\\\\\\n0 & 0 & 1 & 1 & 1\\n\\\\end{pmatrix}.\\n\\\\end{equation*}\\n

For a 5-tuple ${\\\\mathbf x} = (x_1, x_2, x_3, x_4, x_5)^{\\\\rm t}$ to be in the null space of $H\\\\text{,}$ $H{\\\\mathbf x} = {\\\\mathbf 0}\\\\text{.}$ Equivalently, the following system of equations must be satisfied:

\\n\\\\begin{align*}\\nx_2 + x_4 & = 0\\\\\\\\\\nx_1 + x_2 + x_3 + x_4 & = 0\\\\\\\\\\nx_3 + x_4 + x_5 & = 0.\\n\\\\end{align*}\\n

The set of binary 5-tuples satisfying these equations is

\\n\\\\begin{equation*}\\n(00000) \\\\qquad (11110) \\\\qquad (10101) \\\\qquad (01011).\\n\\\\end{equation*}\\n

This code is easily determined to be a group code.

\",\n \"_____no_output_____\"\n ],\n [\n \"
Theorem8.21

Let $H$ be in ${\\\\mathbb M}_{m \\\\times n}({\\\\mathbb Z}_2)\\\\text{.}$ Then the null space of $H$ is a group code.

Proof

Since each element of ${\\\\mathbb Z}_2^n$ is its own inverse, the only thing that really needs to be checked here is closure. Let ${\\\\mathbf x}, {\\\\mathbf y} \\\\in {\\\\rm Null}(H)$ for some matrix $H$ in ${\\\\mathbb M}_{m \\\\times n}({\\\\mathbb Z}_2)\\\\text{.}$ Then $H{\\\\mathbf x} = {\\\\mathbf 0}$ and $H{\\\\mathbf y} = {\\\\mathbf 0}\\\\text{.}$ So

\\n\\\\begin{equation*}\\nH({\\\\mathbf x}+{\\\\mathbf y}) = H{\\\\mathbf x} + H{\\\\mathbf y} = {\\\\mathbf 0} + {\\\\mathbf 0} = {\\\\mathbf 0}.\\n\\\\end{equation*}\\n

Hence, ${\\\\mathbf x} + {\\\\mathbf y}$ is in the null space of $H$ and therefore must be a codeword.

\",\n \"_____no_output_____\"\n ],\n [\n \"

A code is a linear code if it is determined by the null space of some matrix $H \\\\in {\\\\mathbb M}_{m \\\\times n}({\\\\mathbb Z}_2)\\\\text{.}$

\",\n \"_____no_output_____\"\n ],\n [\n \"
Example8.22

Let $C$ be the code given by the matrix

\\n\\\\begin{equation*}\\nH =\\n\\\\begin{pmatrix}\\n0 & 0 & 0 & 1 & 1 & 1 \\\\\\\\\\n0 & 1 & 1 & 0 & 1 & 1 \\\\\\\\\\n1 & 0 & 1 & 0 & 0 & 1\\n\\\\end{pmatrix}.\\n\\\\end{equation*}\\n

Suppose that the 6-tuple ${\\\\mathbf x} = (010011)^{\\\\rm t}$ is received. It is a simple matter of matrix multiplication to determine whether or not ${\\\\mathbf x}$ is a codeword. Since

\\n\\\\begin{equation*}\\nH{\\\\mathbf x} =\\n\\\\begin{pmatrix} \\n0 \\\\\\\\ 1 \\\\\\\\ 1\\n\\\\end{pmatrix},\\n\\\\end{equation*}\\n

the received word is not a codeword. We must either attempt to correct the word or request that it be transmitted again.

\",\n \"_____no_output_____\"\n ]\n ]\n]"},"cell_types":{"kind":"list like","value":["code","markdown"],"string":"[\n \"code\",\n \"markdown\"\n]"},"cell_type_groups":{"kind":"list like","value":[["code"],["markdown","markdown","markdown","markdown","markdown","markdown","markdown","markdown","markdown","markdown","markdown","markdown","markdown","markdown","markdown","markdown","markdown","markdown"]],"string":"[\n [\n \"code\"\n ],\n [\n \"markdown\",\n \"markdown\",\n \"markdown\",\n \"markdown\",\n \"markdown\",\n \"markdown\",\n \"markdown\",\n \"markdown\",\n \"markdown\",\n \"markdown\",\n \"markdown\",\n \"markdown\",\n \"markdown\",\n \"markdown\",\n \"markdown\",\n \"markdown\",\n \"markdown\",\n \"markdown\"\n ]\n]"}}},{"rowIdx":1459038,"cells":{"hexsha":{"kind":"string","value":"e7ee7b1ee06aa022711bafe80a3249a1ffd6237c"},"size":{"kind":"number","value":271316,"string":"271,316"},"ext":{"kind":"string","value":"ipynb"},"lang":{"kind":"string","value":"Jupyter Notebook"},"max_stars_repo_path":{"kind":"string","value":"code/8_CNN_cifar10_mnist.ipynb"},"max_stars_repo_name":{"kind":"string","value":"Akshatha-Jagadish/DL_topics"},"max_stars_repo_head_hexsha":{"kind":"string","value":"98aa979dde2021a20e7b561b83230ac0a475cf5e"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"code/8_CNN_cifar10_mnist.ipynb"},"max_issues_repo_name":{"kind":"string","value":"Akshatha-Jagadish/DL_topics"},"max_issues_repo_head_hexsha":{"kind":"string","value":"98aa979dde2021a20e7b561b83230ac0a475cf5e"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"code/8_CNN_cifar10_mnist.ipynb"},"max_forks_repo_name":{"kind":"string","value":"Akshatha-Jagadish/DL_topics"},"max_forks_repo_head_hexsha":{"kind":"string","value":"98aa979dde2021a20e7b561b83230ac0a475cf5e"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"avg_line_length":{"kind":"number","value":143.7055084746,"string":"143.705508"},"max_line_length":{"kind":"number","value":165112,"string":"165,112"},"alphanum_fraction":{"kind":"number","value":0.8741836088,"string":"0.874184"},"cells":{"kind":"list like","value":[[["import tensorflow as tf\nfrom tensorflow.keras import datasets,layers,models\nimport matplotlib.pyplot as plt\nimport numpy as np","_____no_output_____"],["(X_train, y_train), (X_test, y_test) = datasets.cifar10.load_data()\nX_train.shape","_____no_output_____"],["X_test.shape","_____no_output_____"],["plt.imshow(X_train[0])","_____no_output_____"],["classes = ['airplane','automobile','bird','cat','deer','dog','frog','horse','ship','truck']","_____no_output_____"],["y_train = y_train.reshape(-1,)\nclasses[y_train[0]]","_____no_output_____"],["def plot_sample(X,y,index):\n plt.figure(figsize=(15,2))\n plt.imshow(X[index])\n plt.xlabel(classes[y[index]])","_____no_output_____"],["plot_sample(X_train,y_train,0)","_____no_output_____"],["plot_sample(X_train,y_train,4)","_____no_output_____"],["X_train = X_train/255\nX_test = X_test/255","_____no_output_____"],["#model building\nmodel = models.Sequential([\n layers.Flatten(input_shape=(32,32,3)),\n layers.Dense(3000,activation='relu'),\n layers.Dense(1000,activation='relu'),\n layers.Dense(10,activation='sigmoid'),\n])\n\nmodel.compile(\n optimizer='SGD',\n loss='sparse_categorical_crossentropy', \n metrics=['accuracy']\n)\n\nmodel.fit(X_train,y_train,epochs=5)","Epoch 1/5\n1563/1563 [==============================] - 97s 62ms/step - loss: 1.8602 - accuracy: 0.3331\nEpoch 2/5\n1563/1563 [==============================] - 96s 61ms/step - loss: 1.6575 - accuracy: 0.41310s -\nEpoch 3/5\n1563/1563 [==============================] - 96s 62ms/step - loss: 1.5689 - accuracy: 0.4462\nEpoch 4/5\n1563/1563 [==============================] - 86s 55ms/step - loss: 1.5080 - accuracy: 0.4688\nEpoch 5/5\n1563/1563 [==============================] - 86s 55ms/step - loss: 1.4557 - accuracy: 0.4862\n"],["tf.__version__","_____no_output_____"],["from sklearn.metrics import confusion_matrix, classification_report\nimport numpy as np\ny_pred = model.predict(X_test)\ny_pred_classes = [np.argmax(element) for element in y_pred]\n\nprint('Classification_report: \\n',classification_report(y_test,y_pred_classes))","Classification_report: \n precision recall f1-score support\n\n 0 0.64 0.31 0.41 1000\n 1 0.70 0.49 0.58 1000\n 2 0.23 0.70 0.34 1000\n 3 0.32 0.41 0.36 1000\n 4 0.47 0.19 0.27 1000\n 5 0.44 0.25 0.32 1000\n 6 0.47 0.54 0.51 1000\n 7 0.69 0.32 0.44 1000\n 8 0.52 0.67 0.58 1000\n 9 0.67 0.39 0.49 1000\n\n accuracy 0.43 10000\n macro avg 0.51 0.43 0.43 10000\nweighted avg 0.51 0.43 0.43 10000\n\n"],["cnn = models.Sequential([\n #cnn\n layers.Conv2D(filters=32,kernel_size=(3,3),activation='relu',input_shape=(32,32,3)),\n layers.MaxPool2D((2,2)),\n layers.Conv2D(filters=64,kernel_size=(3,3),activation='relu'),\n layers.MaxPool2D((2,2)),\n \n #dense\n layers.Flatten(),\n layers.Dense(64,activation='relu'),\n layers.Dense(10,activation='softmax')\n])\n\ncnn.compile(\n optimizer='SGD',\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy']\n)\n\ncnn.fit(X_train,y_train,epochs=10)","Epoch 1/10\n1563/1563 [==============================] - 46s 30ms/step - loss: 2.0075 - accuracy: 0.27360s - loss: 2.009\nEpoch 2/10\n1563/1563 [==============================] - 41s 26ms/step - loss: 1.6264 - accuracy: 0.4201\nEpoch 3/10\n1563/1563 [==============================] - 48s 31ms/step - loss: 1.4375 - accuracy: 0.4883\nEpoch 4/10\n1563/1563 [==============================] - 47s 30ms/step - loss: 1.3361 - accuracy: 0.5303\nEpoch 5/10\n1563/1563 [==============================] - 40s 26ms/step - loss: 1.2569 - accuracy: 0.5582\nEpoch 6/10\n1563/1563 [==============================] - 43s 27ms/step - loss: 1.1882 - accuracy: 0.5832\nEpoch 7/10\n1563/1563 [==============================] - 40s 26ms/step - loss: 1.1306 - accuracy: 0.6034\nEpoch 8/10\n1563/1563 [==============================] - 42s 27ms/step - loss: 1.0750 - accuracy: 0.6270\nEpoch 9/10\n1563/1563 [==============================] - 41s 26ms/step - loss: 1.0297 - accuracy: 0.6418\nEpoch 10/10\n1563/1563 [==============================] - 41s 26ms/step - loss: 0.9862 - accuracy: 0.6572\n"],["cnn.evaluate(X_test,y_test)","313/313 [==============================] - 2s 7ms/step - loss: 1.0495 - accuracy: 0.6361\n"],["y_pred = cnn.predict(X_test)\ny_test = y_test.reshape(-1,)\ny_test","_____no_output_____"],["y_pred_classes = [np.argmax(element) for element in y_pred]\ny_pred_classes","_____no_output_____"],["plot_sample(X_test,y_test,2)","_____no_output_____"],["classes[y_pred_classes[2]]","_____no_output_____"],["print(\"Classification report: \\n\", classification_report(y_test,y_pred_classes))","Classification report: \n precision recall f1-score support\n\n 0 0.70 0.70 0.70 1000\n 1 0.78 0.66 0.72 1000\n 2 0.58 0.45 0.50 1000\n 3 0.59 0.28 0.38 1000\n 4 0.55 0.62 0.58 1000\n 5 0.55 0.59 0.57 1000\n 6 0.62 0.83 0.71 1000\n 7 0.63 0.77 0.70 1000\n 8 0.82 0.68 0.75 1000\n 9 0.59 0.79 0.68 1000\n\n accuracy 0.64 10000\n macro avg 0.64 0.64 0.63 10000\nweighted avg 0.64 0.64 0.63 10000\n\n"]],[["![image.png](attachment:image.png)","_____no_output_____"]],[["(X_train,y_train),(X_test,y_test) = datasets.mnist.load_data()\nX_train.shape","_____no_output_____"],["X_train = X_train.reshape(60000,28,28,1)\nX_train.shape","_____no_output_____"],["X_train.shape","_____no_output_____"],["plt.imshow(X_train[0])","_____no_output_____"],["X_train = X_train/255\nX_test = X_test/255","_____no_output_____"],["mnist_cnn = models.Sequential([\n layers.Conv2D(filters=10, kernel_size=(5,5), activation='relu',input_shape=(28,28,1)),\n layers.MaxPooling2D(2,2),\n# layers.Conv2D(filters=5, kernel_size=(3,3), activation='relu',input_shape=(28,28,1)),\n# layers.MaxPooling2D(2,2),\n layers.Flatten(),\n layers.Dense(50,activation='relu'),\n layers.Dense(10,activation='softmax')\n])\n\nmnist_cnn.compile(\n optimizer='adam',\n loss='sparse_categorical_crossentropy', #categories 1,2,3... sparse because output is integer\n metrics=['accuracy']\n)\n\nmnist_cnn.fit(X_train,y_train,epochs=10)","Epoch 1/10\n1875/1875 [==============================] - 12s 7ms/step - loss: 0.1976 - accuracy: 0.9426\nEpoch 2/10\n1875/1875 [==============================] - 13s 7ms/step - loss: 0.0706 - accuracy: 0.9786 0s - loss: 0.0707 - \nEpoch 3/10\n1875/1875 [==============================] - 12s 7ms/step - loss: 0.0491 - accuracy: 0.9850\nEpoch 4/10\n1875/1875 [==============================] - 13s 7ms/step - loss: 0.0388 - accuracy: 0.9878\nEpoch 5/10\n1875/1875 [==============================] - 13s 7ms/step - loss: 0.0303 - accuracy: 0.9903\nEpoch 6/10\n1875/1875 [==============================] - 13s 7ms/step - loss: 0.0245 - accuracy: 0.9920\nEpoch 7/10\n1875/1875 [==============================] - 13s 7ms/step - loss: 0.0192 - accuracy: 0.9935\nEpoch 8/10\n1875/1875 [==============================] - 13s 7ms/step - loss: 0.0157 - accuracy: 0.9948\nEpoch 9/10\n1875/1875 [==============================] - 13s 7ms/step - loss: 0.0126 - accuracy: 0.9960\nEpoch 10/10\n1875/1875 [==============================] - 13s 7ms/step - loss: 0.0113 - accuracy: 0.9961\n"],["X_test.shape","_____no_output_____"],["X_test = X_test.reshape(10000,28,28,1)","313/313 [==============================] - 1s 3ms/step - loss: 0.0560 - accuracy: 0.9843\n"],["mnist_cnn.evaluate(X_test,y_test)","313/313 [==============================] - 1s 3ms/step - loss: 0.0557 - accuracy: 0.9843\n"],["y_pred = mnist_cnn.predict(X_test)\ny_pred_classes = [np.argmax(element) for element in y_pred]\ncm = confusion_matrix(y_test,y_pred_classes)\ncm","_____no_output_____"],["import seaborn as sn\nplt.figure(figsize=(10,7))\nsn.heatmap(cm,annot=True,fmt='d')\nplt.xlabel('Predicted')\nplt.ylabel('Truth')","_____no_output_____"]]],"string":"[\n [\n [\n \"import tensorflow as tf\\nfrom tensorflow.keras import datasets,layers,models\\nimport matplotlib.pyplot as plt\\nimport numpy as np\",\n \"_____no_output_____\"\n ],\n [\n \"(X_train, y_train), (X_test, y_test) = datasets.cifar10.load_data()\\nX_train.shape\",\n \"_____no_output_____\"\n ],\n [\n \"X_test.shape\",\n \"_____no_output_____\"\n ],\n [\n \"plt.imshow(X_train[0])\",\n \"_____no_output_____\"\n ],\n [\n \"classes = ['airplane','automobile','bird','cat','deer','dog','frog','horse','ship','truck']\",\n \"_____no_output_____\"\n ],\n [\n \"y_train = y_train.reshape(-1,)\\nclasses[y_train[0]]\",\n \"_____no_output_____\"\n ],\n [\n \"def plot_sample(X,y,index):\\n plt.figure(figsize=(15,2))\\n plt.imshow(X[index])\\n plt.xlabel(classes[y[index]])\",\n \"_____no_output_____\"\n ],\n [\n \"plot_sample(X_train,y_train,0)\",\n \"_____no_output_____\"\n ],\n [\n \"plot_sample(X_train,y_train,4)\",\n \"_____no_output_____\"\n ],\n [\n \"X_train = X_train/255\\nX_test = X_test/255\",\n \"_____no_output_____\"\n ],\n [\n \"#model building\\nmodel = models.Sequential([\\n layers.Flatten(input_shape=(32,32,3)),\\n layers.Dense(3000,activation='relu'),\\n layers.Dense(1000,activation='relu'),\\n layers.Dense(10,activation='sigmoid'),\\n])\\n\\nmodel.compile(\\n optimizer='SGD',\\n loss='sparse_categorical_crossentropy', \\n metrics=['accuracy']\\n)\\n\\nmodel.fit(X_train,y_train,epochs=5)\",\n \"Epoch 1/5\\n1563/1563 [==============================] - 97s 62ms/step - loss: 1.8602 - accuracy: 0.3331\\nEpoch 2/5\\n1563/1563 [==============================] - 96s 61ms/step - loss: 1.6575 - accuracy: 0.41310s -\\nEpoch 3/5\\n1563/1563 [==============================] - 96s 62ms/step - loss: 1.5689 - accuracy: 0.4462\\nEpoch 4/5\\n1563/1563 [==============================] - 86s 55ms/step - loss: 1.5080 - accuracy: 0.4688\\nEpoch 5/5\\n1563/1563 [==============================] - 86s 55ms/step - loss: 1.4557 - accuracy: 0.4862\\n\"\n ],\n [\n \"tf.__version__\",\n \"_____no_output_____\"\n ],\n [\n \"from sklearn.metrics import confusion_matrix, classification_report\\nimport numpy as np\\ny_pred = model.predict(X_test)\\ny_pred_classes = [np.argmax(element) for element in y_pred]\\n\\nprint('Classification_report: \\\\n',classification_report(y_test,y_pred_classes))\",\n \"Classification_report: \\n precision recall f1-score support\\n\\n 0 0.64 0.31 0.41 1000\\n 1 0.70 0.49 0.58 1000\\n 2 0.23 0.70 0.34 1000\\n 3 0.32 0.41 0.36 1000\\n 4 0.47 0.19 0.27 1000\\n 5 0.44 0.25 0.32 1000\\n 6 0.47 0.54 0.51 1000\\n 7 0.69 0.32 0.44 1000\\n 8 0.52 0.67 0.58 1000\\n 9 0.67 0.39 0.49 1000\\n\\n accuracy 0.43 10000\\n macro avg 0.51 0.43 0.43 10000\\nweighted avg 0.51 0.43 0.43 10000\\n\\n\"\n ],\n [\n \"cnn = models.Sequential([\\n #cnn\\n layers.Conv2D(filters=32,kernel_size=(3,3),activation='relu',input_shape=(32,32,3)),\\n layers.MaxPool2D((2,2)),\\n layers.Conv2D(filters=64,kernel_size=(3,3),activation='relu'),\\n layers.MaxPool2D((2,2)),\\n \\n #dense\\n layers.Flatten(),\\n layers.Dense(64,activation='relu'),\\n layers.Dense(10,activation='softmax')\\n])\\n\\ncnn.compile(\\n optimizer='SGD',\\n loss='sparse_categorical_crossentropy',\\n metrics=['accuracy']\\n)\\n\\ncnn.fit(X_train,y_train,epochs=10)\",\n \"Epoch 1/10\\n1563/1563 [==============================] - 46s 30ms/step - loss: 2.0075 - accuracy: 0.27360s - loss: 2.009\\nEpoch 2/10\\n1563/1563 [==============================] - 41s 26ms/step - loss: 1.6264 - accuracy: 0.4201\\nEpoch 3/10\\n1563/1563 [==============================] - 48s 31ms/step - loss: 1.4375 - accuracy: 0.4883\\nEpoch 4/10\\n1563/1563 [==============================] - 47s 30ms/step - loss: 1.3361 - accuracy: 0.5303\\nEpoch 5/10\\n1563/1563 [==============================] - 40s 26ms/step - loss: 1.2569 - accuracy: 0.5582\\nEpoch 6/10\\n1563/1563 [==============================] - 43s 27ms/step - loss: 1.1882 - accuracy: 0.5832\\nEpoch 7/10\\n1563/1563 [==============================] - 40s 26ms/step - loss: 1.1306 - accuracy: 0.6034\\nEpoch 8/10\\n1563/1563 [==============================] - 42s 27ms/step - loss: 1.0750 - accuracy: 0.6270\\nEpoch 9/10\\n1563/1563 [==============================] - 41s 26ms/step - loss: 1.0297 - accuracy: 0.6418\\nEpoch 10/10\\n1563/1563 [==============================] - 41s 26ms/step - loss: 0.9862 - accuracy: 0.6572\\n\"\n ],\n [\n \"cnn.evaluate(X_test,y_test)\",\n \"313/313 [==============================] - 2s 7ms/step - loss: 1.0495 - accuracy: 0.6361\\n\"\n ],\n [\n \"y_pred = cnn.predict(X_test)\\ny_test = y_test.reshape(-1,)\\ny_test\",\n \"_____no_output_____\"\n ],\n [\n \"y_pred_classes = [np.argmax(element) for element in y_pred]\\ny_pred_classes\",\n \"_____no_output_____\"\n ],\n [\n \"plot_sample(X_test,y_test,2)\",\n \"_____no_output_____\"\n ],\n [\n \"classes[y_pred_classes[2]]\",\n \"_____no_output_____\"\n ],\n [\n \"print(\\\"Classification report: \\\\n\\\", classification_report(y_test,y_pred_classes))\",\n \"Classification report: \\n precision recall f1-score support\\n\\n 0 0.70 0.70 0.70 1000\\n 1 0.78 0.66 0.72 1000\\n 2 0.58 0.45 0.50 1000\\n 3 0.59 0.28 0.38 1000\\n 4 0.55 0.62 0.58 1000\\n 5 0.55 0.59 0.57 1000\\n 6 0.62 0.83 0.71 1000\\n 7 0.63 0.77 0.70 1000\\n 8 0.82 0.68 0.75 1000\\n 9 0.59 0.79 0.68 1000\\n\\n accuracy 0.64 10000\\n macro avg 0.64 0.64 0.63 10000\\nweighted avg 0.64 0.64 0.63 10000\\n\\n\"\n ]\n ],\n [\n [\n \"![image.png](attachment:image.png)\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"(X_train,y_train),(X_test,y_test) = datasets.mnist.load_data()\\nX_train.shape\",\n \"_____no_output_____\"\n ],\n [\n \"X_train = X_train.reshape(60000,28,28,1)\\nX_train.shape\",\n \"_____no_output_____\"\n ],\n [\n \"X_train.shape\",\n \"_____no_output_____\"\n ],\n [\n \"plt.imshow(X_train[0])\",\n \"_____no_output_____\"\n ],\n [\n \"X_train = X_train/255\\nX_test = X_test/255\",\n \"_____no_output_____\"\n ],\n [\n \"mnist_cnn = models.Sequential([\\n layers.Conv2D(filters=10, kernel_size=(5,5), activation='relu',input_shape=(28,28,1)),\\n layers.MaxPooling2D(2,2),\\n# layers.Conv2D(filters=5, kernel_size=(3,3), activation='relu',input_shape=(28,28,1)),\\n# layers.MaxPooling2D(2,2),\\n layers.Flatten(),\\n layers.Dense(50,activation='relu'),\\n layers.Dense(10,activation='softmax')\\n])\\n\\nmnist_cnn.compile(\\n optimizer='adam',\\n loss='sparse_categorical_crossentropy', #categories 1,2,3... sparse because output is integer\\n metrics=['accuracy']\\n)\\n\\nmnist_cnn.fit(X_train,y_train,epochs=10)\",\n \"Epoch 1/10\\n1875/1875 [==============================] - 12s 7ms/step - loss: 0.1976 - accuracy: 0.9426\\nEpoch 2/10\\n1875/1875 [==============================] - 13s 7ms/step - loss: 0.0706 - accuracy: 0.9786 0s - loss: 0.0707 - \\nEpoch 3/10\\n1875/1875 [==============================] - 12s 7ms/step - loss: 0.0491 - accuracy: 0.9850\\nEpoch 4/10\\n1875/1875 [==============================] - 13s 7ms/step - loss: 0.0388 - accuracy: 0.9878\\nEpoch 5/10\\n1875/1875 [==============================] - 13s 7ms/step - loss: 0.0303 - accuracy: 0.9903\\nEpoch 6/10\\n1875/1875 [==============================] - 13s 7ms/step - loss: 0.0245 - accuracy: 0.9920\\nEpoch 7/10\\n1875/1875 [==============================] - 13s 7ms/step - loss: 0.0192 - accuracy: 0.9935\\nEpoch 8/10\\n1875/1875 [==============================] - 13s 7ms/step - loss: 0.0157 - accuracy: 0.9948\\nEpoch 9/10\\n1875/1875 [==============================] - 13s 7ms/step - loss: 0.0126 - accuracy: 0.9960\\nEpoch 10/10\\n1875/1875 [==============================] - 13s 7ms/step - loss: 0.0113 - accuracy: 0.9961\\n\"\n ],\n [\n \"X_test.shape\",\n \"_____no_output_____\"\n ],\n [\n \"X_test = X_test.reshape(10000,28,28,1)\",\n \"313/313 [==============================] - 1s 3ms/step - loss: 0.0560 - accuracy: 0.9843\\n\"\n ],\n [\n \"mnist_cnn.evaluate(X_test,y_test)\",\n \"313/313 [==============================] - 1s 3ms/step - loss: 0.0557 - accuracy: 0.9843\\n\"\n ],\n [\n \"y_pred = mnist_cnn.predict(X_test)\\ny_pred_classes = [np.argmax(element) for element in y_pred]\\ncm = confusion_matrix(y_test,y_pred_classes)\\ncm\",\n \"_____no_output_____\"\n ],\n [\n \"import seaborn as sn\\nplt.figure(figsize=(10,7))\\nsn.heatmap(cm,annot=True,fmt='d')\\nplt.xlabel('Predicted')\\nplt.ylabel('Truth')\",\n \"_____no_output_____\"\n ]\n ]\n]"},"cell_types":{"kind":"list like","value":["code","markdown","code"],"string":"[\n \"code\",\n \"markdown\",\n \"code\"\n]"},"cell_type_groups":{"kind":"list like","value":[["code","code","code","code","code","code","code","code","code","code","code","code","code","code","code","code","code","code","code","code"],["markdown"],["code","code","code","code","code","code","code","code","code","code","code"]],"string":"[\n [\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\"\n ]\n]"}}},{"rowIdx":1459039,"cells":{"hexsha":{"kind":"string","value":"e7ee7b4b8461c11a296300edc425d8a016820012"},"size":{"kind":"number","value":5059,"string":"5,059"},"ext":{"kind":"string","value":"ipynb"},"lang":{"kind":"string","value":"Jupyter Notebook"},"max_stars_repo_path":{"kind":"string","value":"ipynb/novel-taxa/Index.ipynb"},"max_stars_repo_name":{"kind":"string","value":"sjanssen2/tax-credit-data"},"max_stars_repo_head_hexsha":{"kind":"string","value":"ed1a46f27b343e9519e6dc0cb1dece06e8017996"},"max_stars_repo_licenses":{"kind":"list like","value":["BSD-3-Clause"],"string":"[\n \"BSD-3-Clause\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"ipynb/novel-taxa/Index.ipynb"},"max_issues_repo_name":{"kind":"string","value":"sjanssen2/tax-credit-data"},"max_issues_repo_head_hexsha":{"kind":"string","value":"ed1a46f27b343e9519e6dc0cb1dece06e8017996"},"max_issues_repo_licenses":{"kind":"list like","value":["BSD-3-Clause"],"string":"[\n \"BSD-3-Clause\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"ipynb/novel-taxa/Index.ipynb"},"max_forks_repo_name":{"kind":"string","value":"sjanssen2/tax-credit-data"},"max_forks_repo_head_hexsha":{"kind":"string","value":"ed1a46f27b343e9519e6dc0cb1dece06e8017996"},"max_forks_repo_licenses":{"kind":"list like","value":["BSD-3-Clause"],"string":"[\n \"BSD-3-Clause\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"avg_line_length":{"kind":"number","value":63.2375,"string":"63.2375"},"max_line_length":{"kind":"number","value":819,"string":"819"},"alphanum_fraction":{"kind":"number","value":0.6694999012,"string":"0.6695"},"cells":{"kind":"list like","value":[[["# Novel-taxa classification evaluation\n\nThe following notebooks describe the evaluation of taxonomy classifiers using \"novel taxa\" data sets. Novel-taxa analysis is a form of cross-validated taxonomic classification, wherein random unique sequences are sampled from the reference database as a test set; all sequences sharing taxonomic affiliation at a given taxonomic level are removed from the reference database (training set); and taxonomy is assigned to the query sequences at the given taxonomic level. Thus, this test interrogates the behavior of a taxonomy classifier when challenged with \"novel\" sequences that are not represented by close matches within the reference sequence database. Such an analysis is performed to assess the degree to which \"overassignment\" occurs for sequences that are not represented in a reference database.\n\nAt each level ``L``, the unique taxonomic clades are randomly sampled and used as ``QUERY`` sequences. All sequences that match that taxonomic annotation at ``L`` are excluded from ``REF``. Hence, species-level ``QUERY`` assignment asks how accurate assignment is to an \"unknown\" species that is not represented in the ``REF``, though other species in the same genus are. Genus-level ``QUERY`` assignment asks how accurate assignment is to an \"unknown\" genus that is not represented in the ``REF``, though other genera in the same family are, *et cetera*.\n\nThe steps involved in preparing and executing novel-taxa analysis are described in a series of notebooks:\n\n1) **[Novel taxa dataset generation](./dataset-generation.ipynb)** only needs to be performed once for a given reference database. Only run this notebook if you wish to make novel taxa datasets from a different reference database, or alter the parameters used to make the novel taxa datasets. The default included in Tax-Credit is Greengenes 13\\_8 release, amplified *in silico* with primers 515f and 806r, and trimmed to 250 nt from the 5' end.\n\n2) **[Taxonomic classification](./taxonomy-assignment.ipynb)** of novel taxa sequences is performed using the datasets generated in *step 1*. This template currently describes classification using QIIME 1 classifiers and can be used as a template for classifiers that are called via command line interface. Python-based classifiers can be used following the example of q2-feature-classifier.\n\n3) **[Classifier evaluation](./evaluate-classification.ipynb)** is performed based on taxonomic classifications generated by each classifier used in *step 2*. \n\n\n## Definitions\nThe **[dataset generation](./dataset-generation.ipynb)** notebook uses a few novel definitions. The following provides some explanation of the definitions used in that notebook.\n\n* ``source`` = original reference database sequences and taxonomy.\n* ``QUERY`` = 'novel' query sequences and taxonomies randomly drawn from ``source``. \n* ``REF`` = ``source`` - ``novel`` taxa, used for taxonomy assignment.\n* ``L`` = taxonomic level being tested\n * 0 = kingdom, 1 = phylum, 2 = class, 3 = order, 4 = family, 5 = genus, 6 = species\n* ``branching`` = describes a taxon at level ``L`` that \"branches\" into two or more lineages at ``L + 1``. \n * A \"branched\" taxon, then, describes these lineages. E.g., in the example below Lactobacillaceae, Lactobacillus, and Pediococcus branch, while Paralactobacillus is unbranching. The Lactobacillus and Pediococcus species are \"branched\". Paralactobacillus selangorensis is \"unbranched\"\n * The novel taxa analysis only uses \"branching\" taxa, such that for each ``QUERY`` at level ``L``, ``REF`` must contain one or more taxa that share the same clade at level ``L - 1``.\n\n```\nLactobacillaceae\n └── Lactobacillus\n │ ├── Lactobacillus brevis\n │ └── Lactobacillus sanfranciscensis\n ├── Pediococcus\n │ ├── Pediococcus damnosus\n │ └── Pediococcus claussenii\n └── Paralactobacillus\n └── Paralactobacillus selangorensis\n```\n","_____no_output_____"]]],"string":"[\n [\n [\n \"# Novel-taxa classification evaluation\\n\\nThe following notebooks describe the evaluation of taxonomy classifiers using \\\"novel taxa\\\" data sets. Novel-taxa analysis is a form of cross-validated taxonomic classification, wherein random unique sequences are sampled from the reference database as a test set; all sequences sharing taxonomic affiliation at a given taxonomic level are removed from the reference database (training set); and taxonomy is assigned to the query sequences at the given taxonomic level. Thus, this test interrogates the behavior of a taxonomy classifier when challenged with \\\"novel\\\" sequences that are not represented by close matches within the reference sequence database. Such an analysis is performed to assess the degree to which \\\"overassignment\\\" occurs for sequences that are not represented in a reference database.\\n\\nAt each level ``L``, the unique taxonomic clades are randomly sampled and used as ``QUERY`` sequences. All sequences that match that taxonomic annotation at ``L`` are excluded from ``REF``. Hence, species-level ``QUERY`` assignment asks how accurate assignment is to an \\\"unknown\\\" species that is not represented in the ``REF``, though other species in the same genus are. Genus-level ``QUERY`` assignment asks how accurate assignment is to an \\\"unknown\\\" genus that is not represented in the ``REF``, though other genera in the same family are, *et cetera*.\\n\\nThe steps involved in preparing and executing novel-taxa analysis are described in a series of notebooks:\\n\\n1) **[Novel taxa dataset generation](./dataset-generation.ipynb)** only needs to be performed once for a given reference database. Only run this notebook if you wish to make novel taxa datasets from a different reference database, or alter the parameters used to make the novel taxa datasets. The default included in Tax-Credit is Greengenes 13\\\\_8 release, amplified *in silico* with primers 515f and 806r, and trimmed to 250 nt from the 5' end.\\n\\n2) **[Taxonomic classification](./taxonomy-assignment.ipynb)** of novel taxa sequences is performed using the datasets generated in *step 1*. This template currently describes classification using QIIME 1 classifiers and can be used as a template for classifiers that are called via command line interface. Python-based classifiers can be used following the example of q2-feature-classifier.\\n\\n3) **[Classifier evaluation](./evaluate-classification.ipynb)** is performed based on taxonomic classifications generated by each classifier used in *step 2*. \\n\\n\\n## Definitions\\nThe **[dataset generation](./dataset-generation.ipynb)** notebook uses a few novel definitions. The following provides some explanation of the definitions used in that notebook.\\n\\n* ``source`` = original reference database sequences and taxonomy.\\n* ``QUERY`` = 'novel' query sequences and taxonomies randomly drawn from ``source``. \\n* ``REF`` = ``source`` - ``novel`` taxa, used for taxonomy assignment.\\n* ``L`` = taxonomic level being tested\\n * 0 = kingdom, 1 = phylum, 2 = class, 3 = order, 4 = family, 5 = genus, 6 = species\\n* ``branching`` = describes a taxon at level ``L`` that \\\"branches\\\" into two or more lineages at ``L + 1``. \\n * A \\\"branched\\\" taxon, then, describes these lineages. E.g., in the example below Lactobacillaceae, Lactobacillus, and Pediococcus branch, while Paralactobacillus is unbranching. The Lactobacillus and Pediococcus species are \\\"branched\\\". Paralactobacillus selangorensis is \\\"unbranched\\\"\\n * The novel taxa analysis only uses \\\"branching\\\" taxa, such that for each ``QUERY`` at level ``L``, ``REF`` must contain one or more taxa that share the same clade at level ``L - 1``.\\n\\n```\\nLactobacillaceae\\n └── Lactobacillus\\n │ ├── Lactobacillus brevis\\n │ └── Lactobacillus sanfranciscensis\\n ├── Pediococcus\\n │ ├── Pediococcus damnosus\\n │ └── Pediococcus claussenii\\n └── Paralactobacillus\\n └── Paralactobacillus selangorensis\\n```\\n\",\n \"_____no_output_____\"\n ]\n ]\n]"},"cell_types":{"kind":"list like","value":["markdown"],"string":"[\n \"markdown\"\n]"},"cell_type_groups":{"kind":"list like","value":[["markdown"]],"string":"[\n [\n \"markdown\"\n ]\n]"}}},{"rowIdx":1459040,"cells":{"hexsha":{"kind":"string","value":"e7ee819ae0585c20ac91f8f2d276fd0b1f71a598"},"size":{"kind":"number","value":24441,"string":"24,441"},"ext":{"kind":"string","value":"ipynb"},"lang":{"kind":"string","value":"Jupyter Notebook"},"max_stars_repo_path":{"kind":"string","value":"runAnalyses.ipynb"},"max_stars_repo_name":{"kind":"string","value":"draran/decision-weight-recovery"},"max_stars_repo_head_hexsha":{"kind":"string","value":"41c5b888439af621afbb898dbe8bfef66b1e72df"},"max_stars_repo_licenses":{"kind":"list like","value":["CC0-1.0"],"string":"[\n \"CC0-1.0\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"runAnalyses.ipynb"},"max_issues_repo_name":{"kind":"string","value":"draran/decision-weight-recovery"},"max_issues_repo_head_hexsha":{"kind":"string","value":"41c5b888439af621afbb898dbe8bfef66b1e72df"},"max_issues_repo_licenses":{"kind":"list like","value":["CC0-1.0"],"string":"[\n \"CC0-1.0\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"runAnalyses.ipynb"},"max_forks_repo_name":{"kind":"string","value":"draran/decision-weight-recovery"},"max_forks_repo_head_hexsha":{"kind":"string","value":"41c5b888439af621afbb898dbe8bfef66b1e72df"},"max_forks_repo_licenses":{"kind":"list like","value":["CC0-1.0"],"string":"[\n \"CC0-1.0\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"avg_line_length":{"kind":"number","value":35.2175792507,"string":"35.217579"},"max_line_length":{"kind":"number","value":119,"string":"119"},"alphanum_fraction":{"kind":"number","value":0.4224049752,"string":"0.422405"},"cells":{"kind":"list like","value":[[["# Importing necessary libraries\n#===============================================================================\nimport matplotlib as mpl\nmpl.use('qt5agg')\nmpl.interactive(True)\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sbn\nsbn.set()\nfrom scipy import stats\nimport h5py\nfrom os.path import dirname\nfrom pathlib import Path\nimport sys\nimport mmodel_reversals as mm","_____no_output_____"],["# Setting paths\n#===============================================================================\nROOTPATH = Path().cwd()\n(ROOTPATH / 'Export').mkdir(parents=True, exist_ok=True)","_____no_output_____"],["# Function to compute complex-valued OLS\n#===============================================================================\ndef complexGLM(pred, crit):\n '''\n Compute regression weights for predicting the criterion variable using predictor arrays\n In -> pred = predictor array, crit = criterion vector\n Out -> coefs = regression coefficients/weights\n '''\n pred = np.array(pred)\n crit = np.array(crit)\n if len(crit.shape) < 2:\n crit = crit.reshape(-1, 1)\n if pred.dtype is not np.dtype('complex'):\n pred = np.exp(pred * 1j)\n if crit.dtype is not np.dtype('complex'):\n crit = np.exp(crit * 1j)\n a, b = [crit.shape[0], pred.shape[0]]\n if crit.shape[0] != pred.shape[0]:\n raise ValueError('The two arrays are of incompatible shape, {} and {}'.format(a, b))\n coefs = np.asmatrix(np.asmatrix(pred).H * np.asmatrix(pred)).I * (np.asmatrix(pred).H * np.asmatrix(crit))\n return coefs","_____no_output_____"],["# Setting simulation parameters\n#===============================================================================\nnp.random.seed(0)\ntrlN = 1000\nrunN = 10000\nsimK = np.sort([.1, 2.5, 1., 5., 10.])","_____no_output_____"],["# Simulate independently sampled motion directions\n#===============================================================================\npresDirs_ind = np.angle(\n np.exp(\n np.random.uniform(\n 0, 2 * np.pi, \n size = [runN, trlN, 6]\n ) * 1j\n )\n)\n\npercDirs_ind = np.concatenate([\n np.angle(\n np.exp(\n np.array(\n [\n np.random.vonmises(\n presDirs_ind, K\n )\n for K in simK\n ]\n ) * 1j\n )\n ),\n # no noise condition, K = inf\n presDirs_ind[None]\n])\n# saving data for independently sampled directions\nwith h5py.File(ROOTPATH / 'Export' / 'simData.hdf', 'a') as f:\n f.create_dataset(\n name = 'presDirs_ind', \n data = presDirs_ind, \n compression = 9\n )\nwith h5py.File(ROOTPATH / 'Export' / 'simData.hdf', 'a') as f:\n f.create_dataset(\n name = 'percDirs_ind', \n data = percDirs_ind, \n compression = 9\n )\npresDirs_ind = None\npercDirs_ind = None","_____no_output_____"],["# Simulate dependently sampled motion direction\n#===============================================================================\nfrstTar, frstFoil = np.random.choice(\n np.arange(0, 360), \n size = [2, runN, trlN]\n)\nfrstDis, scndTar = (\n frstTar[None] \n # random direction (CW/CCW)\n + np.random.choice(\n [-1, 1],\n size = [2, runN, trlN]\n ) \n # random angular offset\n * np.random.choice(\n np.arange(30, 151),\n size = [2, runN, trlN]\n )\n)\nscndDis, scndFoil = (\n np.stack(\n [scndTar, frstFoil]\n )\n # random direction (CW/CCW)\n + np.random.choice(\n [-1, 1],\n size = [2, runN, trlN]\n ) \n # random angular offset\n * np.random.choice(\n np.arange(30, 151),\n size = [2, runN, trlN]\n )\n)\npresDirs_dep = np.angle(\n np.exp(\n np.deg2rad(np.stack(\n [frstTar, scndTar, frstDis, scndDis, frstFoil, scndFoil],\n axis = -1\n )) * 1j\n )\n)\n\npercDirs_dep = np.concatenate([\n np.angle(\n np.exp(\n np.array(\n [\n np.random.vonmises(\n presDirs_dep, K\n )\n for K in simK\n ]\n ) * 1j\n )\n ),\n # no noise condition, K = inf\n presDirs_dep[None]\n])\n\n# saving data for dependently sampled directions\nwith h5py.File(ROOTPATH / 'Export' / 'simData.hdf', 'a') as f:\n f.create_dataset(\n name = 'presDirs_dep', \n data = presDirs_dep, \n compression = 9\n )\nwith h5py.File(ROOTPATH / 'Export' / 'simData.hdf', 'a') as f:\n f.create_dataset(\n name = 'percDirs_dep', \n data = percDirs_dep, \n compression = 9\n )\npresDirs_dep = None\npercDirs_dep = None","_____no_output_____"],["# Simulate complex-valued regression weights\n#===============================================================================\nsimCoefAbs = np.random.uniform(size = [runN, 6])\n# the angles of weigthing coeficients\nsimCoefAng = np.random.uniform(\n 0, 2 * np.pi,\n size = [runN, 6]\n)\nwith h5py.File(ROOTPATH / 'Export' / 'simData.hdf', 'a') as f:\n f.create_dataset(\n name = 'coefsAbs', \n data = simCoefAbs, \n compression = 9\n )\n f.create_dataset(\n name = 'coefsAng', \n data = simCoefAng, \n compression = 9\n )\nsimCoefAbs = None\nsimCoefAng = None","_____no_output_____"],["# Run complex-valued OLS for different simulation conditions\n#===============================================================================\nfor cond in ['ind', 'dep', 'dep_ss']:\n # there are three conditions:\n # ind: independently sampled motion\n # dep: dependently sampled motion\n # dep_ss: dependently sampled motion, 100 trials per run\n print('Analysing {} simulation condition'.format(cond.upper()))\n ssize = None\n cond_raw = cond\n if 'ss' in cond.split('_'):\n cond, ssize = cond.split('_')\n with h5py.File(ROOTPATH / 'Export' / 'simData.hdf', 'r') as f:\n presDirs = f['presDirs_{}'.format(cond)][:]\n percDirs = f['percDirs_{}'.format(cond)][:]\n coefsAbs = f['coefsAbs'][:]\n coefsAng = f['coefsAng'][:]\n if ssize:\n presDirs = presDirs[:, :100]\n percDirs = percDirs[:, :, :100]\n\n # running complex-values OLS for different simulated weight angles\n for idx_simAngle, simAngle in enumerate(['null', 'real']):\n # two analyses are run\n # null: the angles of the simulated complex-valued regression weights are zero\n # real: the angles are are randomly sampled \n simCoefs = (\n np.exp(\n [0, 1][idx_simAngle] * coefsAng * 1j\n ) * coefsAbs\n ) \n # %% simulating response on the basis of perceived directions and simulated\n respDirs = np.array([\n np.angle(\n np.sum(\n simCoefs[:, None] \n * np.exp(simKappa * 1j), \n -1))\n for simKappa in percDirs\n ])\n # weighting coefficients\n coefs = np.array(\n [\n [\n complexGLM(presDirs[idxRun], run)\n for idxRun, run in enumerate(simKappa)\n ]\n for simKappa in respDirs\n ] \n ).squeeze()\n print('Finished complex OLS')\n # %% goodness of fit\n predDirs = np.array([\n np.angle(\n np.sum(\n simKappa[:, None, :] \n * np.exp(presDirs * 1j), -1\n )\n )\n for simKappa in coefs\n ])\n GoF = np.array([\n np.angle(\n np.exp(respDirs[simKappa] * 1j)\n / np.exp(predDirs[simKappa] * 1j)\n )\n for simKappa in range(coefs.shape[0])\n ])\n # saving data\n with h5py.File(ROOTPATH / 'Export' / 'simCoefs.hdf', 'a') as f:\n f.create_dataset(\n name = 'coefsAbsHat_{}_{}'.format(cond_raw,simAngle), \n data = np.abs(coefs), \n compression = 9\n )\n f.create_dataset(\n name = 'coefsAngHat_{}_{}'.format(cond_raw,simAngle), \n data = np.angle(coefs), \n compression = 9\n )\n f.create_dataset(\n name = 'GoF_{}_{}'.format(cond_raw,simAngle), \n data = GoF, \n compression = 9\n )","_____no_output_____"],["# Setting parameters for plotting supplementary figure 1\n#===============================================================================\n# two different plottings can be performed\n# first, the results for simulated complex-valued weights using real angles\n# second, the results for simulated weights using zero angles\n# here, only the real values are plotted.\n# N.B., the results for zero angles yields similart goodness-of-fit\n# N.B., the ability of the complex-valued OLS to recover the angles (not plotted)\n# is similar to its ability to recover the lengths, i.e., the decision weights .\nconds = [\n 'GoF_ind_real',\n 'GoF_dep_real',\n 'GoF_dep_ss_real'\n]\nwith h5py.File(ROOTPATH / 'Export' / 'simCoefs.hdf', 'r') as f:\n GoF = dict([(cond, f[cond][:]) for cond in conds])","_____no_output_____"],["# Plotting supplementary figure 1\n#===============================================================================\nsbn.set_style('ticks')\nSSIZE = 8\nMSIZE = 10\nLSIZE = 12\nparams = {'lines.linewidth' : 1.5,\n 'grid.linewidth' : 1,\n 'xtick.labelsize' : MSIZE,\n 'ytick.labelsize' : MSIZE,\n 'xtick.major.width' : 1,\n 'ytick.major.width' : 1,\n 'xtick.major.size' : 5,\n 'ytick.major.size' : 5,\n 'xtick.direction' : 'inout',\n 'ytick.direction' :'inout',\n 'axes.linewidth': 1,\n 'axes.labelsize' : MSIZE,\n 'axes.titlesize' : MSIZE,\n 'figure.titlesize' : LSIZE,\n 'font.size' : MSIZE,\n 'savefig.dpi': 300,\n 'font.sans-serif' : ['Calibri'],\n 'legend.fontsize' : MSIZE,\n 'hatch.linewidth' : .2}\nsbn.mpl.rcParams.update(params)\ncols = sbn.husl_palette(6, h = .15, s = .75, l = .5)\nsimK = np.sort([.1, 2.5, 1., 5., 10.])\nsimNoise = np.random.vonmises(0, simK[:, None], [5, 100000])\nfig = plt.figure(figsize = (8,2.8))\nax = fig.add_subplot(1, 4, 1)\nfor idx_noise, noise in enumerate(simNoise):\n sbn.kdeplot(\n noise,\n color = cols[idx_noise],\n alpha = .8,\n lw = 2,\n label = simK[idx_noise],\n ax = ax\n )\nax.axvline(0, color = cols[-1], alpha = .8, lw = 2, label = 'No noise')\nfor idx_cond, cond in enumerate(conds):\n ax = fig.add_subplot(1,4,2 + idx_cond)\n for idxK, err in enumerate(GoF[cond]):\n sbn.kdeplot(\n err.flatten(),\n color = cols[idxK],\n alpha = .8,\n lw = 2,\n label = '{}$\\degree$'.format(\n np.rad2deg(mm.cstd(err.flatten())).astype('int')\n ),\n ax = ax\n )\nfor idx_ax, ax in enumerate(fig.axes):\n title = '$\\kappa$'\n xlab = 'Perceptual noise'\n if idx_ax:\n title = '$\\sigma$'\n xlab = 'Prediction error'\n ax.legend(\n title = title, \n frameon = False,\n handlelength = 1,\n handletextpad = .5,\n markerfirst = False\n )\n ax.set_ylim(-0.05, 7)\n ax.set_xlim(-np.pi*1.1, np.pi*1.1)\n ax.set_xticks([-np.pi, 0, np.pi])\n ax.set_xticklabels(['-$\\pi$', '0', '$\\pi$'])\n ax.set_yticks([])\n ax.set_xlabel(xlab)\n ax.set_ylabel('Probability density')\n sbn.despine(ax = ax)\n ax.spines['bottom'].set_bounds(-np.pi, np.pi)\n ax.spines['left'].set_visible(False)\n if idx_ax:\n ax.yaxis.set_visible(False)\nplt.tight_layout(rect = (0, 0, 1, 1))\nfig.savefig(\n str(ROOTPATH / 'Export'/ 'GoodnessOfFit_All.png'), \n dpi = 600\n)\nplt.close(fig)","_____no_output_____"],["# Setting parameters for plotting supplementary figure 2\n#===============================================================================\nconds = [\n 'ind_real',\n 'dep_real',\n 'dep_ss_real'\n]\nwith h5py.File(ROOTPATH / 'Export' / 'simData.hdf', 'r') as f:\n coefsAbs = f['coefsAbs'][:]\ncols = sbn.husl_palette(6, h = .15, s = .75, l = .5)","_____no_output_____"],["# Plotting panels A-C of supplementary figure 2\n#===============================================================================\nfor idx_cond, cond in enumerate(conds):\n fig = plt.figure(figsize = (4,2.8))\n with h5py.File(ROOTPATH / 'Export' / 'simCoefs.hdf', 'r') as f:\n coefsAbsHat = f['_'.join(['coefsAbsHat', cond])][:]\n for idxK, weights in enumerate(coefsAbsHat):\n ax = fig.add_subplot(2, 3, idxK + 1)\n scatter = ax.plot(\n coefsAbs.flatten(), \n weights.flatten(), \n '.',\n mec = (.9,.9,.9),\n mfc = 'none',\n zorder = -10\n )\n line = ax.plot(\n np.array([0, 1]), np.array([0, 1]), \n 'k--',\n lw = 1,\n zorder = 0\n )\n bins = pd.qcut(coefsAbs.flatten(), 4).codes\n dataset = [weights.flatten()[bins == bin] for bin in np.unique(bins)]\n vlnplt = ax.violinplot(\n dataset, \n positions = [.125, .375, .625, .875],\n showextrema = False,\n showmedians = True,\n widths = .15,\n ) \n for i in vlnplt['bodies']:\n i.set_alpha(.8)\n i.set_facecolor(cols[idxK])\n i.set_lw(0)\n vlnplt['cmedians'].set_edgecolor('white')\n vlnplt['cmedians'].set_lw(.5)\n ax.text(\n .05, .95,\n (\n ['$\\kappa$ = {}'.format(k) for k in simK] \n + ['No noise']\n )[idxK],\n transform = ax.transAxes,\n va = 'top'\n )\n ax.set_xlabel('Simulated weights')\n ax.set_ylabel('Estimated weights')\n for idx_ax, ax in enumerate(fig.axes):\n ax.tick_params('both', direction = 'out')\n ax.set_xlim(-.1, 1.1)\n ax.set_ylim(-.1, 1.1)\n ax.spines['bottom'].set_bounds(0,1)\n ax.spines['left'].set_bounds(0,1)\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n ax.set_xticks(np.linspace(0, 1, 3))\n ax.set_yticks(np.linspace(0, 1, 3))\n if idx_ax not in [0, 3]:\n ax.yaxis.set_visible(False)\n ax.spines['left'].set_visible(False)\n if idx_ax not in [3, 4, 5]:\n ax.xaxis.set_visible(False)\n ax.spines['bottom'].set_visible(False)\n plt.tight_layout(rect = (0, 0, 1, .975))\n label = [\n 'Independently sampled motion, 10$^3$ trials, 10$^4$ runs',\n 'Dependently sampled motion, 10$^3$ trials, 10$^4$ runs',\n 'Dependently sampled motion, 10$^2$ trials, 10$^4$ runs'\n ][idx_cond]\n fig.text(\n .5, 1, \n label,\n ha = 'center',\n va = 'top'\n )\n fig.savefig(\n str(\n ROOTPATH / \n 'Export' / \n 'WeightRecovery_{}.png'\n ).format([\n 'A', 'B', 'C'\n ][idx_cond]),\n dpi = 600\n )\n plt.close(fig)","_____no_output_____"],["# Plotting panel D of supplementary figure 2\n#===============================================================================\nfrom mpl_toolkits.axes_grid1 import ImageGrid\ncols = sbn.husl_palette(6, h = .15, s = .75, l = .5)\nfig = plt.figure(figsize = (4,2.8))\ngrid = ImageGrid(\n fig, 111, nrows_ncols = (2, 3), \n share_all = True, cbar_mode= 'single', aspect= True\n)\nfor idxK, weights in enumerate(coefsAbsHat):\n ax = grid[idxK]\n heatmap, xedges, yedges = np.histogram2d(\n np.array(list(map(\n stats.rankdata,\n coefsAbs\n ))).flatten(), \n np.array(list(map(\n stats.rankdata,\n weights\n ))).flatten(),\n bins = np.linspace(.5, 6.5, 7)\n )\n heatmap /= heatmap.sum()\n extent = [xedges[0], xedges[-1], yedges[0], yedges[-1]]\n im = ax.imshow(\n heatmap, \n extent = extent, origin = 'lower', \n vmin = 0, vmax = .15,\n cmap = 'viridis'\n )\n ax.text(\n .05, .95,\n (\n ['$\\kappa$ = {}'.format(k) for k in simK] \n + ['No noise']\n )[idxK],\n transform = ax.transAxes,\n va = 'top',\n color = 'white'\n )\ngrid.cbar_axes[0].colorbar(im)\ngrid.cbar_axes[0].set_ylim(0, .14)\ngrid.cbar_axes[0].set_yticks([.0, .05, .10, .15])\ngrid.cbar_axes[0].set_yticklabels(['0','5','10', '15'])\ngrid.cbar_axes[0].tick_params(direction = 'inout', length = 5)\ngrid[0].tick_params('both', direction = 'out', length = 5)\nfor idx_ax, ax in enumerate(grid):\n ax.tick_params('both', direction = 'inout', length = 5)\n ax.set_yticks(np.linspace(1,6,6))\n ax.set_xticks(np.linspace(1,6,6))\n if idx_ax not in [0, 3]:\n ax.yaxis.set_visible(False)\n if idx_ax < 3:\n ax.xaxis.set_visible(False)\nplt.tight_layout(rect = (.01, .01, .94, .99))\nfig.text(\n .5, .99, \n 'Dependently sampled motion, 10$^2$ trials, 10$^4$ runs',\n ha = 'center',\n va = 'top'\n)\nfig.text(\n .01, .5,\n 'Estimated weight rank',\n ha = 'left',\n va = 'center',\n rotation = 90\n)\nfig.text(\n .5, .01,\n 'Simulated weight rank',\n ha = 'center',\n va = 'bottom',\n)\nfig.text(\n .99, .5,\n 'Frequency [%]',\n ha = 'right',\n va = 'center',\n rotation = -90\n)\nfig.savefig(\n str(\n ROOTPATH /\n 'Export' /\n 'WeightRecovery_D.png'\n ), \n dpi = 600\n)\nplt.close(fig)","_____no_output_____"]]],"string":"[\n [\n [\n \"# Importing necessary libraries\\n#===============================================================================\\nimport matplotlib as mpl\\nmpl.use('qt5agg')\\nmpl.interactive(True)\\nimport matplotlib.pyplot as plt\\nimport numpy as np\\nimport pandas as pd\\nimport seaborn as sbn\\nsbn.set()\\nfrom scipy import stats\\nimport h5py\\nfrom os.path import dirname\\nfrom pathlib import Path\\nimport sys\\nimport mmodel_reversals as mm\",\n \"_____no_output_____\"\n ],\n [\n \"# Setting paths\\n#===============================================================================\\nROOTPATH = Path().cwd()\\n(ROOTPATH / 'Export').mkdir(parents=True, exist_ok=True)\",\n \"_____no_output_____\"\n ],\n [\n \"# Function to compute complex-valued OLS\\n#===============================================================================\\ndef complexGLM(pred, crit):\\n '''\\n Compute regression weights for predicting the criterion variable using predictor arrays\\n In -> pred = predictor array, crit = criterion vector\\n Out -> coefs = regression coefficients/weights\\n '''\\n pred = np.array(pred)\\n crit = np.array(crit)\\n if len(crit.shape) < 2:\\n crit = crit.reshape(-1, 1)\\n if pred.dtype is not np.dtype('complex'):\\n pred = np.exp(pred * 1j)\\n if crit.dtype is not np.dtype('complex'):\\n crit = np.exp(crit * 1j)\\n a, b = [crit.shape[0], pred.shape[0]]\\n if crit.shape[0] != pred.shape[0]:\\n raise ValueError('The two arrays are of incompatible shape, {} and {}'.format(a, b))\\n coefs = np.asmatrix(np.asmatrix(pred).H * np.asmatrix(pred)).I * (np.asmatrix(pred).H * np.asmatrix(crit))\\n return coefs\",\n \"_____no_output_____\"\n ],\n [\n \"# Setting simulation parameters\\n#===============================================================================\\nnp.random.seed(0)\\ntrlN = 1000\\nrunN = 10000\\nsimK = np.sort([.1, 2.5, 1., 5., 10.])\",\n \"_____no_output_____\"\n ],\n [\n \"# Simulate independently sampled motion directions\\n#===============================================================================\\npresDirs_ind = np.angle(\\n np.exp(\\n np.random.uniform(\\n 0, 2 * np.pi, \\n size = [runN, trlN, 6]\\n ) * 1j\\n )\\n)\\n\\npercDirs_ind = np.concatenate([\\n np.angle(\\n np.exp(\\n np.array(\\n [\\n np.random.vonmises(\\n presDirs_ind, K\\n )\\n for K in simK\\n ]\\n ) * 1j\\n )\\n ),\\n # no noise condition, K = inf\\n presDirs_ind[None]\\n])\\n# saving data for independently sampled directions\\nwith h5py.File(ROOTPATH / 'Export' / 'simData.hdf', 'a') as f:\\n f.create_dataset(\\n name = 'presDirs_ind', \\n data = presDirs_ind, \\n compression = 9\\n )\\nwith h5py.File(ROOTPATH / 'Export' / 'simData.hdf', 'a') as f:\\n f.create_dataset(\\n name = 'percDirs_ind', \\n data = percDirs_ind, \\n compression = 9\\n )\\npresDirs_ind = None\\npercDirs_ind = None\",\n \"_____no_output_____\"\n ],\n [\n \"# Simulate dependently sampled motion direction\\n#===============================================================================\\nfrstTar, frstFoil = np.random.choice(\\n np.arange(0, 360), \\n size = [2, runN, trlN]\\n)\\nfrstDis, scndTar = (\\n frstTar[None] \\n # random direction (CW/CCW)\\n + np.random.choice(\\n [-1, 1],\\n size = [2, runN, trlN]\\n ) \\n # random angular offset\\n * np.random.choice(\\n np.arange(30, 151),\\n size = [2, runN, trlN]\\n )\\n)\\nscndDis, scndFoil = (\\n np.stack(\\n [scndTar, frstFoil]\\n )\\n # random direction (CW/CCW)\\n + np.random.choice(\\n [-1, 1],\\n size = [2, runN, trlN]\\n ) \\n # random angular offset\\n * np.random.choice(\\n np.arange(30, 151),\\n size = [2, runN, trlN]\\n )\\n)\\npresDirs_dep = np.angle(\\n np.exp(\\n np.deg2rad(np.stack(\\n [frstTar, scndTar, frstDis, scndDis, frstFoil, scndFoil],\\n axis = -1\\n )) * 1j\\n )\\n)\\n\\npercDirs_dep = np.concatenate([\\n np.angle(\\n np.exp(\\n np.array(\\n [\\n np.random.vonmises(\\n presDirs_dep, K\\n )\\n for K in simK\\n ]\\n ) * 1j\\n )\\n ),\\n # no noise condition, K = inf\\n presDirs_dep[None]\\n])\\n\\n# saving data for dependently sampled directions\\nwith h5py.File(ROOTPATH / 'Export' / 'simData.hdf', 'a') as f:\\n f.create_dataset(\\n name = 'presDirs_dep', \\n data = presDirs_dep, \\n compression = 9\\n )\\nwith h5py.File(ROOTPATH / 'Export' / 'simData.hdf', 'a') as f:\\n f.create_dataset(\\n name = 'percDirs_dep', \\n data = percDirs_dep, \\n compression = 9\\n )\\npresDirs_dep = None\\npercDirs_dep = None\",\n \"_____no_output_____\"\n ],\n [\n \"# Simulate complex-valued regression weights\\n#===============================================================================\\nsimCoefAbs = np.random.uniform(size = [runN, 6])\\n# the angles of weigthing coeficients\\nsimCoefAng = np.random.uniform(\\n 0, 2 * np.pi,\\n size = [runN, 6]\\n)\\nwith h5py.File(ROOTPATH / 'Export' / 'simData.hdf', 'a') as f:\\n f.create_dataset(\\n name = 'coefsAbs', \\n data = simCoefAbs, \\n compression = 9\\n )\\n f.create_dataset(\\n name = 'coefsAng', \\n data = simCoefAng, \\n compression = 9\\n )\\nsimCoefAbs = None\\nsimCoefAng = None\",\n \"_____no_output_____\"\n ],\n [\n \"# Run complex-valued OLS for different simulation conditions\\n#===============================================================================\\nfor cond in ['ind', 'dep', 'dep_ss']:\\n # there are three conditions:\\n # ind: independently sampled motion\\n # dep: dependently sampled motion\\n # dep_ss: dependently sampled motion, 100 trials per run\\n print('Analysing {} simulation condition'.format(cond.upper()))\\n ssize = None\\n cond_raw = cond\\n if 'ss' in cond.split('_'):\\n cond, ssize = cond.split('_')\\n with h5py.File(ROOTPATH / 'Export' / 'simData.hdf', 'r') as f:\\n presDirs = f['presDirs_{}'.format(cond)][:]\\n percDirs = f['percDirs_{}'.format(cond)][:]\\n coefsAbs = f['coefsAbs'][:]\\n coefsAng = f['coefsAng'][:]\\n if ssize:\\n presDirs = presDirs[:, :100]\\n percDirs = percDirs[:, :, :100]\\n\\n # running complex-values OLS for different simulated weight angles\\n for idx_simAngle, simAngle in enumerate(['null', 'real']):\\n # two analyses are run\\n # null: the angles of the simulated complex-valued regression weights are zero\\n # real: the angles are are randomly sampled \\n simCoefs = (\\n np.exp(\\n [0, 1][idx_simAngle] * coefsAng * 1j\\n ) * coefsAbs\\n ) \\n # %% simulating response on the basis of perceived directions and simulated\\n respDirs = np.array([\\n np.angle(\\n np.sum(\\n simCoefs[:, None] \\n * np.exp(simKappa * 1j), \\n -1))\\n for simKappa in percDirs\\n ])\\n # weighting coefficients\\n coefs = np.array(\\n [\\n [\\n complexGLM(presDirs[idxRun], run)\\n for idxRun, run in enumerate(simKappa)\\n ]\\n for simKappa in respDirs\\n ] \\n ).squeeze()\\n print('Finished complex OLS')\\n # %% goodness of fit\\n predDirs = np.array([\\n np.angle(\\n np.sum(\\n simKappa[:, None, :] \\n * np.exp(presDirs * 1j), -1\\n )\\n )\\n for simKappa in coefs\\n ])\\n GoF = np.array([\\n np.angle(\\n np.exp(respDirs[simKappa] * 1j)\\n / np.exp(predDirs[simKappa] * 1j)\\n )\\n for simKappa in range(coefs.shape[0])\\n ])\\n # saving data\\n with h5py.File(ROOTPATH / 'Export' / 'simCoefs.hdf', 'a') as f:\\n f.create_dataset(\\n name = 'coefsAbsHat_{}_{}'.format(cond_raw,simAngle), \\n data = np.abs(coefs), \\n compression = 9\\n )\\n f.create_dataset(\\n name = 'coefsAngHat_{}_{}'.format(cond_raw,simAngle), \\n data = np.angle(coefs), \\n compression = 9\\n )\\n f.create_dataset(\\n name = 'GoF_{}_{}'.format(cond_raw,simAngle), \\n data = GoF, \\n compression = 9\\n )\",\n \"_____no_output_____\"\n ],\n [\n \"# Setting parameters for plotting supplementary figure 1\\n#===============================================================================\\n# two different plottings can be performed\\n# first, the results for simulated complex-valued weights using real angles\\n# second, the results for simulated weights using zero angles\\n# here, only the real values are plotted.\\n# N.B., the results for zero angles yields similart goodness-of-fit\\n# N.B., the ability of the complex-valued OLS to recover the angles (not plotted)\\n# is similar to its ability to recover the lengths, i.e., the decision weights .\\nconds = [\\n 'GoF_ind_real',\\n 'GoF_dep_real',\\n 'GoF_dep_ss_real'\\n]\\nwith h5py.File(ROOTPATH / 'Export' / 'simCoefs.hdf', 'r') as f:\\n GoF = dict([(cond, f[cond][:]) for cond in conds])\",\n \"_____no_output_____\"\n ],\n [\n \"# Plotting supplementary figure 1\\n#===============================================================================\\nsbn.set_style('ticks')\\nSSIZE = 8\\nMSIZE = 10\\nLSIZE = 12\\nparams = {'lines.linewidth' : 1.5,\\n 'grid.linewidth' : 1,\\n 'xtick.labelsize' : MSIZE,\\n 'ytick.labelsize' : MSIZE,\\n 'xtick.major.width' : 1,\\n 'ytick.major.width' : 1,\\n 'xtick.major.size' : 5,\\n 'ytick.major.size' : 5,\\n 'xtick.direction' : 'inout',\\n 'ytick.direction' :'inout',\\n 'axes.linewidth': 1,\\n 'axes.labelsize' : MSIZE,\\n 'axes.titlesize' : MSIZE,\\n 'figure.titlesize' : LSIZE,\\n 'font.size' : MSIZE,\\n 'savefig.dpi': 300,\\n 'font.sans-serif' : ['Calibri'],\\n 'legend.fontsize' : MSIZE,\\n 'hatch.linewidth' : .2}\\nsbn.mpl.rcParams.update(params)\\ncols = sbn.husl_palette(6, h = .15, s = .75, l = .5)\\nsimK = np.sort([.1, 2.5, 1., 5., 10.])\\nsimNoise = np.random.vonmises(0, simK[:, None], [5, 100000])\\nfig = plt.figure(figsize = (8,2.8))\\nax = fig.add_subplot(1, 4, 1)\\nfor idx_noise, noise in enumerate(simNoise):\\n sbn.kdeplot(\\n noise,\\n color = cols[idx_noise],\\n alpha = .8,\\n lw = 2,\\n label = simK[idx_noise],\\n ax = ax\\n )\\nax.axvline(0, color = cols[-1], alpha = .8, lw = 2, label = 'No noise')\\nfor idx_cond, cond in enumerate(conds):\\n ax = fig.add_subplot(1,4,2 + idx_cond)\\n for idxK, err in enumerate(GoF[cond]):\\n sbn.kdeplot(\\n err.flatten(),\\n color = cols[idxK],\\n alpha = .8,\\n lw = 2,\\n label = '{}$\\\\degree$'.format(\\n np.rad2deg(mm.cstd(err.flatten())).astype('int')\\n ),\\n ax = ax\\n )\\nfor idx_ax, ax in enumerate(fig.axes):\\n title = '$\\\\kappa$'\\n xlab = 'Perceptual noise'\\n if idx_ax:\\n title = '$\\\\sigma$'\\n xlab = 'Prediction error'\\n ax.legend(\\n title = title, \\n frameon = False,\\n handlelength = 1,\\n handletextpad = .5,\\n markerfirst = False\\n )\\n ax.set_ylim(-0.05, 7)\\n ax.set_xlim(-np.pi*1.1, np.pi*1.1)\\n ax.set_xticks([-np.pi, 0, np.pi])\\n ax.set_xticklabels(['-$\\\\pi$', '0', '$\\\\pi$'])\\n ax.set_yticks([])\\n ax.set_xlabel(xlab)\\n ax.set_ylabel('Probability density')\\n sbn.despine(ax = ax)\\n ax.spines['bottom'].set_bounds(-np.pi, np.pi)\\n ax.spines['left'].set_visible(False)\\n if idx_ax:\\n ax.yaxis.set_visible(False)\\nplt.tight_layout(rect = (0, 0, 1, 1))\\nfig.savefig(\\n str(ROOTPATH / 'Export'/ 'GoodnessOfFit_All.png'), \\n dpi = 600\\n)\\nplt.close(fig)\",\n \"_____no_output_____\"\n ],\n [\n \"# Setting parameters for plotting supplementary figure 2\\n#===============================================================================\\nconds = [\\n 'ind_real',\\n 'dep_real',\\n 'dep_ss_real'\\n]\\nwith h5py.File(ROOTPATH / 'Export' / 'simData.hdf', 'r') as f:\\n coefsAbs = f['coefsAbs'][:]\\ncols = sbn.husl_palette(6, h = .15, s = .75, l = .5)\",\n \"_____no_output_____\"\n ],\n [\n \"# Plotting panels A-C of supplementary figure 2\\n#===============================================================================\\nfor idx_cond, cond in enumerate(conds):\\n fig = plt.figure(figsize = (4,2.8))\\n with h5py.File(ROOTPATH / 'Export' / 'simCoefs.hdf', 'r') as f:\\n coefsAbsHat = f['_'.join(['coefsAbsHat', cond])][:]\\n for idxK, weights in enumerate(coefsAbsHat):\\n ax = fig.add_subplot(2, 3, idxK + 1)\\n scatter = ax.plot(\\n coefsAbs.flatten(), \\n weights.flatten(), \\n '.',\\n mec = (.9,.9,.9),\\n mfc = 'none',\\n zorder = -10\\n )\\n line = ax.plot(\\n np.array([0, 1]), np.array([0, 1]), \\n 'k--',\\n lw = 1,\\n zorder = 0\\n )\\n bins = pd.qcut(coefsAbs.flatten(), 4).codes\\n dataset = [weights.flatten()[bins == bin] for bin in np.unique(bins)]\\n vlnplt = ax.violinplot(\\n dataset, \\n positions = [.125, .375, .625, .875],\\n showextrema = False,\\n showmedians = True,\\n widths = .15,\\n ) \\n for i in vlnplt['bodies']:\\n i.set_alpha(.8)\\n i.set_facecolor(cols[idxK])\\n i.set_lw(0)\\n vlnplt['cmedians'].set_edgecolor('white')\\n vlnplt['cmedians'].set_lw(.5)\\n ax.text(\\n .05, .95,\\n (\\n ['$\\\\kappa$ = {}'.format(k) for k in simK] \\n + ['No noise']\\n )[idxK],\\n transform = ax.transAxes,\\n va = 'top'\\n )\\n ax.set_xlabel('Simulated weights')\\n ax.set_ylabel('Estimated weights')\\n for idx_ax, ax in enumerate(fig.axes):\\n ax.tick_params('both', direction = 'out')\\n ax.set_xlim(-.1, 1.1)\\n ax.set_ylim(-.1, 1.1)\\n ax.spines['bottom'].set_bounds(0,1)\\n ax.spines['left'].set_bounds(0,1)\\n ax.spines['top'].set_visible(False)\\n ax.spines['right'].set_visible(False)\\n ax.set_xticks(np.linspace(0, 1, 3))\\n ax.set_yticks(np.linspace(0, 1, 3))\\n if idx_ax not in [0, 3]:\\n ax.yaxis.set_visible(False)\\n ax.spines['left'].set_visible(False)\\n if idx_ax not in [3, 4, 5]:\\n ax.xaxis.set_visible(False)\\n ax.spines['bottom'].set_visible(False)\\n plt.tight_layout(rect = (0, 0, 1, .975))\\n label = [\\n 'Independently sampled motion, 10$^3$ trials, 10$^4$ runs',\\n 'Dependently sampled motion, 10$^3$ trials, 10$^4$ runs',\\n 'Dependently sampled motion, 10$^2$ trials, 10$^4$ runs'\\n ][idx_cond]\\n fig.text(\\n .5, 1, \\n label,\\n ha = 'center',\\n va = 'top'\\n )\\n fig.savefig(\\n str(\\n ROOTPATH / \\n 'Export' / \\n 'WeightRecovery_{}.png'\\n ).format([\\n 'A', 'B', 'C'\\n ][idx_cond]),\\n dpi = 600\\n )\\n plt.close(fig)\",\n \"_____no_output_____\"\n ],\n [\n \"# Plotting panel D of supplementary figure 2\\n#===============================================================================\\nfrom mpl_toolkits.axes_grid1 import ImageGrid\\ncols = sbn.husl_palette(6, h = .15, s = .75, l = .5)\\nfig = plt.figure(figsize = (4,2.8))\\ngrid = ImageGrid(\\n fig, 111, nrows_ncols = (2, 3), \\n share_all = True, cbar_mode= 'single', aspect= True\\n)\\nfor idxK, weights in enumerate(coefsAbsHat):\\n ax = grid[idxK]\\n heatmap, xedges, yedges = np.histogram2d(\\n np.array(list(map(\\n stats.rankdata,\\n coefsAbs\\n ))).flatten(), \\n np.array(list(map(\\n stats.rankdata,\\n weights\\n ))).flatten(),\\n bins = np.linspace(.5, 6.5, 7)\\n )\\n heatmap /= heatmap.sum()\\n extent = [xedges[0], xedges[-1], yedges[0], yedges[-1]]\\n im = ax.imshow(\\n heatmap, \\n extent = extent, origin = 'lower', \\n vmin = 0, vmax = .15,\\n cmap = 'viridis'\\n )\\n ax.text(\\n .05, .95,\\n (\\n ['$\\\\kappa$ = {}'.format(k) for k in simK] \\n + ['No noise']\\n )[idxK],\\n transform = ax.transAxes,\\n va = 'top',\\n color = 'white'\\n )\\ngrid.cbar_axes[0].colorbar(im)\\ngrid.cbar_axes[0].set_ylim(0, .14)\\ngrid.cbar_axes[0].set_yticks([.0, .05, .10, .15])\\ngrid.cbar_axes[0].set_yticklabels(['0','5','10', '15'])\\ngrid.cbar_axes[0].tick_params(direction = 'inout', length = 5)\\ngrid[0].tick_params('both', direction = 'out', length = 5)\\nfor idx_ax, ax in enumerate(grid):\\n ax.tick_params('both', direction = 'inout', length = 5)\\n ax.set_yticks(np.linspace(1,6,6))\\n ax.set_xticks(np.linspace(1,6,6))\\n if idx_ax not in [0, 3]:\\n ax.yaxis.set_visible(False)\\n if idx_ax < 3:\\n ax.xaxis.set_visible(False)\\nplt.tight_layout(rect = (.01, .01, .94, .99))\\nfig.text(\\n .5, .99, \\n 'Dependently sampled motion, 10$^2$ trials, 10$^4$ runs',\\n ha = 'center',\\n va = 'top'\\n)\\nfig.text(\\n .01, .5,\\n 'Estimated weight rank',\\n ha = 'left',\\n va = 'center',\\n rotation = 90\\n)\\nfig.text(\\n .5, .01,\\n 'Simulated weight rank',\\n ha = 'center',\\n va = 'bottom',\\n)\\nfig.text(\\n .99, .5,\\n 'Frequency [%]',\\n ha = 'right',\\n va = 'center',\\n rotation = -90\\n)\\nfig.savefig(\\n str(\\n ROOTPATH /\\n 'Export' /\\n 'WeightRecovery_D.png'\\n ), \\n dpi = 600\\n)\\nplt.close(fig)\",\n \"_____no_output_____\"\n ]\n ]\n]"},"cell_types":{"kind":"list like","value":["code"],"string":"[\n \"code\"\n]"},"cell_type_groups":{"kind":"list like","value":[["code","code","code","code","code","code","code","code","code","code","code","code","code"]],"string":"[\n [\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\"\n ]\n]"}}},{"rowIdx":1459041,"cells":{"hexsha":{"kind":"string","value":"e7ee8dc6e96546ab0448ba8d1f03c210c495240d"},"size":{"kind":"number","value":10344,"string":"10,344"},"ext":{"kind":"string","value":"ipynb"},"lang":{"kind":"string","value":"Jupyter Notebook"},"max_stars_repo_path":{"kind":"string","value":"jupyter-notebook/eda_import_exports.ipynb"},"max_stars_repo_name":{"kind":"string","value":"NanceCA/binational-trade-volumes"},"max_stars_repo_head_hexsha":{"kind":"string","value":"61b367ae5843d542b018ef0ac8e66ebbefb9804f"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":2,"string":"2"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2020-04-05T05:11:35.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2020-07-04T07:05:03.000Z"},"max_issues_repo_path":{"kind":"string","value":"jupyter-notebook/eda_import_exports.ipynb"},"max_issues_repo_name":{"kind":"string","value":"NanceCA/binational-trade-volumes"},"max_issues_repo_head_hexsha":{"kind":"string","value":"61b367ae5843d542b018ef0ac8e66ebbefb9804f"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"number","value":1,"string":"1"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2021-05-11T07:28:52.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2021-05-11T07:28:52.000Z"},"max_forks_repo_path":{"kind":"string","value":"jupyter-notebook/eda_import_exports.ipynb"},"max_forks_repo_name":{"kind":"string","value":"NanceCA/binational-trade-volumes"},"max_forks_repo_head_hexsha":{"kind":"string","value":"61b367ae5843d542b018ef0ac8e66ebbefb9804f"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"number","value":1,"string":"1"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2020-07-04T07:05:04.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2020-07-04T07:05:04.000Z"},"avg_line_length":{"kind":"number","value":21.8227848101,"string":"21.822785"},"max_line_length":{"kind":"number","value":95,"string":"95"},"alphanum_fraction":{"kind":"number","value":0.4651972158,"string":"0.465197"},"cells":{"kind":"list like","value":[[["## EDA for Import and Export Trade Volumes","_____no_output_____"],["### Binational trade relationship between Mexico and the United States","_____no_output_____"]],[["#import key libraries\nimport pandas as pd\nimport matplotlib.pyplot as plt\n%matplotlib inline","_____no_output_____"]],[["## Dataset 1: General Imports from Mexico to the United States","_____no_output_____"]],[["imports = pd.read_csv(\"./data/usitc/total-imports-mx2us.csv\")","_____no_output_____"],["## data to be read includes the customs value of the import and the year\nimports.shape","_____no_output_____"],["imports.head()\n#note that the customs_value and the dollar_amount are the same just different data types","_____no_output_____"],["list(imports.columns)","_____no_output_____"],["imports['imports'].describe()","_____no_output_____"],["imports['dollar_amount'].describe()","_____no_output_____"],["imports['customs_value'].plot(kind=\"bar\")\n## confirming that the data is linear","_____no_output_____"],["plt.scatter(imports[\"year\"],imports['customs_value'],color=\"blue\")\nplt.title('Imports from Mexico to the US, Annual')\nplt.xlabel('year')\nplt.ylabel('customs value e11')\nplt.show()\n##amazing! Looks pretty linear to me","_____no_output_____"]],[["## Dataset #2 Exports from US to Mexico","_____no_output_____"]],[["exports = pd.read_csv(\"./data/usitc/total-exports-us2mx.csv\")","_____no_output_____"],["exports.shape","_____no_output_____"],["exports.head()","_____no_output_____"],["list(exports.columns)","_____no_output_____"],["exports['exports'].describe()","_____no_output_____"],["plt.scatter(exports[\"year\"],exports['exports'],color=\"green\")\nplt.title('Exports from US to Mexico, Annual')\nplt.xlabel('year')\nplt.ylabel('FAS Value e11')\nplt.show()\n##generally pretty linear","_____no_output_____"],["## Combining both exports and imports","_____no_output_____"],["##combine both vectors on one graph\nplt.plot(exports[\"year\"],exports['exports'],color=\"green\")\nplt.scatter(imports[\"year\"],imports['imports'],color=\"blue\")\nplt.title(\"Plotting imports and exports\")\nplt.xlabel(\"Year\")\nplt.ylabel(\"Value\")\nplt.legend()\nplt.show()","_____no_output_____"]],[["## Data preprocessing","_____no_output_____"]],[["# imports\nyear_var = list(imports['year'])\nprint(year_var)","_____no_output_____"],["dollar = list(imports[\"dollar_amount\"])\nprint(dollar)","_____no_output_____"],["def pre_process(year, dollar):\n print(\"[\",year,\",\",dollar,\"]\",\",\")","_____no_output_____"],["pre_process(1996, 2)","_____no_output_____"]],[["## Running descriptive statistics","_____no_output_____"]],[["# Pulling in descriptive statistics on IMPORTS\nfrom scipy import stats\nstats.describe(ytrain_pred)","_____no_output_____"],["imports['imports'].describe()","_____no_output_____"],["exports[\"exports\"].describe()","_____no_output_____"]]],"string":"[\n [\n [\n \"## EDA for Import and Export Trade Volumes\",\n \"_____no_output_____\"\n ],\n [\n \"### Binational trade relationship between Mexico and the United States\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"#import key libraries\\nimport pandas as pd\\nimport matplotlib.pyplot as plt\\n%matplotlib inline\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"## Dataset 1: General Imports from Mexico to the United States\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"imports = pd.read_csv(\\\"./data/usitc/total-imports-mx2us.csv\\\")\",\n \"_____no_output_____\"\n ],\n [\n \"## data to be read includes the customs value of the import and the year\\nimports.shape\",\n \"_____no_output_____\"\n ],\n [\n \"imports.head()\\n#note that the customs_value and the dollar_amount are the same just different data types\",\n \"_____no_output_____\"\n ],\n [\n \"list(imports.columns)\",\n \"_____no_output_____\"\n ],\n [\n \"imports['imports'].describe()\",\n \"_____no_output_____\"\n ],\n [\n \"imports['dollar_amount'].describe()\",\n \"_____no_output_____\"\n ],\n [\n \"imports['customs_value'].plot(kind=\\\"bar\\\")\\n## confirming that the data is linear\",\n \"_____no_output_____\"\n ],\n [\n \"plt.scatter(imports[\\\"year\\\"],imports['customs_value'],color=\\\"blue\\\")\\nplt.title('Imports from Mexico to the US, Annual')\\nplt.xlabel('year')\\nplt.ylabel('customs value e11')\\nplt.show()\\n##amazing! Looks pretty linear to me\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"## Dataset #2 Exports from US to Mexico\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"exports = pd.read_csv(\\\"./data/usitc/total-exports-us2mx.csv\\\")\",\n \"_____no_output_____\"\n ],\n [\n \"exports.shape\",\n \"_____no_output_____\"\n ],\n [\n \"exports.head()\",\n \"_____no_output_____\"\n ],\n [\n \"list(exports.columns)\",\n \"_____no_output_____\"\n ],\n [\n \"exports['exports'].describe()\",\n \"_____no_output_____\"\n ],\n [\n \"plt.scatter(exports[\\\"year\\\"],exports['exports'],color=\\\"green\\\")\\nplt.title('Exports from US to Mexico, Annual')\\nplt.xlabel('year')\\nplt.ylabel('FAS Value e11')\\nplt.show()\\n##generally pretty linear\",\n \"_____no_output_____\"\n ],\n [\n \"## Combining both exports and imports\",\n \"_____no_output_____\"\n ],\n [\n \"##combine both vectors on one graph\\nplt.plot(exports[\\\"year\\\"],exports['exports'],color=\\\"green\\\")\\nplt.scatter(imports[\\\"year\\\"],imports['imports'],color=\\\"blue\\\")\\nplt.title(\\\"Plotting imports and exports\\\")\\nplt.xlabel(\\\"Year\\\")\\nplt.ylabel(\\\"Value\\\")\\nplt.legend()\\nplt.show()\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"## Data preprocessing\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# imports\\nyear_var = list(imports['year'])\\nprint(year_var)\",\n \"_____no_output_____\"\n ],\n [\n \"dollar = list(imports[\\\"dollar_amount\\\"])\\nprint(dollar)\",\n \"_____no_output_____\"\n ],\n [\n \"def pre_process(year, dollar):\\n print(\\\"[\\\",year,\\\",\\\",dollar,\\\"]\\\",\\\",\\\")\",\n \"_____no_output_____\"\n ],\n [\n \"pre_process(1996, 2)\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"## Running descriptive statistics\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# Pulling in descriptive statistics on IMPORTS\\nfrom scipy import stats\\nstats.describe(ytrain_pred)\",\n \"_____no_output_____\"\n ],\n [\n \"imports['imports'].describe()\",\n \"_____no_output_____\"\n ],\n [\n \"exports[\\\"exports\\\"].describe()\",\n \"_____no_output_____\"\n ]\n ]\n]"},"cell_types":{"kind":"list like","value":["markdown","code","markdown","code","markdown","code","markdown","code","markdown","code"],"string":"[\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\"\n]"},"cell_type_groups":{"kind":"list like","value":[["markdown","markdown"],["code"],["markdown"],["code","code","code","code","code","code","code","code"],["markdown"],["code","code","code","code","code","code","code","code"],["markdown"],["code","code","code","code"],["markdown"],["code","code","code"]],"string":"[\n [\n \"markdown\",\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\"\n ]\n]"}}},{"rowIdx":1459042,"cells":{"hexsha":{"kind":"string","value":"e7ee9115cffb2943d409592f71570a25853a3bdf"},"size":{"kind":"number","value":34006,"string":"34,006"},"ext":{"kind":"string","value":"ipynb"},"lang":{"kind":"string","value":"Jupyter Notebook"},"max_stars_repo_path":{"kind":"string","value":"Cauchy Distribution.ipynb"},"max_stars_repo_name":{"kind":"string","value":"vikasgorur/all-of-stats"},"max_stars_repo_head_hexsha":{"kind":"string","value":"6e3f47238537b691a7edca14f124abcabed4ac7d"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"Cauchy Distribution.ipynb"},"max_issues_repo_name":{"kind":"string","value":"vikasgorur/all-of-stats"},"max_issues_repo_head_hexsha":{"kind":"string","value":"6e3f47238537b691a7edca14f124abcabed4ac7d"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"Cauchy Distribution.ipynb"},"max_forks_repo_name":{"kind":"string","value":"vikasgorur/all-of-stats"},"max_forks_repo_head_hexsha":{"kind":"string","value":"6e3f47238537b691a7edca14f124abcabed4ac7d"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"avg_line_length":{"kind":"number","value":213.8742138365,"string":"213.874214"},"max_line_length":{"kind":"number","value":30314,"string":"30,314"},"alphanum_fraction":{"kind":"number","value":0.9208080927,"string":"0.920808"},"cells":{"kind":"list like","value":[[["empty"]]],"string":"[\n [\n [\n \"empty\"\n ]\n ]\n]"},"cell_types":{"kind":"list like","value":["empty"],"string":"[\n \"empty\"\n]"},"cell_type_groups":{"kind":"list like","value":[["empty"]],"string":"[\n [\n \"empty\"\n ]\n]"}}},{"rowIdx":1459043,"cells":{"hexsha":{"kind":"string","value":"e7ee9205aad19aeeb559290637f332ae13d3cd84"},"size":{"kind":"number","value":9189,"string":"9,189"},"ext":{"kind":"string","value":"ipynb"},"lang":{"kind":"string","value":"Jupyter Notebook"},"max_stars_repo_path":{"kind":"string","value":"Explore U.S. Births/Basics.ipynb"},"max_stars_repo_name":{"kind":"string","value":"vipmunot/Data-Science-Projects"},"max_stars_repo_head_hexsha":{"kind":"string","value":"9dcc3b7909074080dad16666f2e1a06ca2f23f86"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":8,"string":"8"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2017-02-28T01:05:52.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2021-05-27T13:25:13.000Z"},"max_issues_repo_path":{"kind":"string","value":"Explore U.S. Births/Basics.ipynb"},"max_issues_repo_name":{"kind":"string","value":"vipmunot/Data-Science-Projects"},"max_issues_repo_head_hexsha":{"kind":"string","value":"9dcc3b7909074080dad16666f2e1a06ca2f23f86"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"Explore U.S. Births/Basics.ipynb"},"max_forks_repo_name":{"kind":"string","value":"vipmunot/Data-Science-Projects"},"max_forks_repo_head_hexsha":{"kind":"string","value":"9dcc3b7909074080dad16666f2e1a06ca2f23f86"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"number","value":2,"string":"2"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2020-10-07T19:39:49.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2021-06-13T08:12:07.000Z"},"avg_line_length":{"kind":"number","value":22.2493946731,"string":"22.249395"},"max_line_length":{"kind":"number","value":80,"string":"80"},"alphanum_fraction":{"kind":"number","value":0.4543475895,"string":"0.454348"},"cells":{"kind":"list like","value":[[["## 1: Introduction To The Dataset","_____no_output_____"]],[["data = open('US_births_1994-2003_CDC_NCHS.csv','r').read().split('\\n')\ndata[:10]","_____no_output_____"]],[["## 2: Converting Data Into A List Of Lists","_____no_output_____"]],[["def read_csv(filename,header = False):\n final_list = []\n read_data = open(filename,'r').read().split('\\n')[1:]\n if header == True:\n read_data = open(filename,'r').read().split('\\n')[1:]\n else:\n read_data = open(filename,'r').read().split('\\n')\n for item in read_data:\n int_fields = []\n string_fields = item.split(',')\n for val in string_fields:\n int_fields.append(int(val))\n final_list.append(int_fields) \n return(final_list) \ncdc_list = read_csv('US_births_1994-2003_CDC_NCHS.csv',header = True)\ncdc_list[:10]","_____no_output_____"]],[["## 3: Calculating Number Of Births Each Month","_____no_output_____"]],[["def month_births(data):\n births_per_month = {}\n for item in data:\n if item[1] in births_per_month.keys():\n births_per_month[item[1]] += item[4]\n else:\n births_per_month[item[1]] = item[4]\n return(births_per_month)\ncdc_month_births = month_births(cdc_list) \ncdc_month_births","_____no_output_____"],["def dow_births(data):\n births_per_dow = {}\n for item in data:\n if item[3] in births_per_dow.keys():\n births_per_dow[item[3]] += item[4]\n else:\n births_per_dow[item[3]] = item[4]\n return(births_per_dow)\ncdc_day_births = dow_births(cdc_list) \ncdc_day_births","_____no_output_____"]],[["## 5: Creating A More General Function","_____no_output_____"]],[["def calc_counts(data,column):\n birth = {}\n for item in data:\n if item[column] in birth.keys():\n birth[item[column]] += item[4]\n else:\n birth[item[column]] = item[4]\n return(birth)\ncdc_year_births = calc_counts(cdc_list, 0)\ncdc_month_births = calc_counts(cdc_list, 1)\ncdc_dom_births = calc_counts(cdc_list, 2)\ncdc_dow_births = calc_counts(cdc_list, 3)","_____no_output_____"],["cdc_year_births","_____no_output_____"],["cdc_month_births","_____no_output_____"],["cdc_dom_births","_____no_output_____"],["cdc_dow_births","_____no_output_____"],["def min_max(dictionary):\n min_val = min(dictionary.items(), key=lambda k: k[1])\n max_val = max(dictionary.items(), key=lambda k: k[1])\n return(\"Minimum Value:%s Maximum Value:%s\"%(min_val,max_val))\nmin_max(cdc_dow_births)","_____no_output_____"]]],"string":"[\n [\n [\n \"## 1: Introduction To The Dataset\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"data = open('US_births_1994-2003_CDC_NCHS.csv','r').read().split('\\\\n')\\ndata[:10]\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"## 2: Converting Data Into A List Of Lists\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"def read_csv(filename,header = False):\\n final_list = []\\n read_data = open(filename,'r').read().split('\\\\n')[1:]\\n if header == True:\\n read_data = open(filename,'r').read().split('\\\\n')[1:]\\n else:\\n read_data = open(filename,'r').read().split('\\\\n')\\n for item in read_data:\\n int_fields = []\\n string_fields = item.split(',')\\n for val in string_fields:\\n int_fields.append(int(val))\\n final_list.append(int_fields) \\n return(final_list) \\ncdc_list = read_csv('US_births_1994-2003_CDC_NCHS.csv',header = True)\\ncdc_list[:10]\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"## 3: Calculating Number Of Births Each Month\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"def month_births(data):\\n births_per_month = {}\\n for item in data:\\n if item[1] in births_per_month.keys():\\n births_per_month[item[1]] += item[4]\\n else:\\n births_per_month[item[1]] = item[4]\\n return(births_per_month)\\ncdc_month_births = month_births(cdc_list) \\ncdc_month_births\",\n \"_____no_output_____\"\n ],\n [\n \"def dow_births(data):\\n births_per_dow = {}\\n for item in data:\\n if item[3] in births_per_dow.keys():\\n births_per_dow[item[3]] += item[4]\\n else:\\n births_per_dow[item[3]] = item[4]\\n return(births_per_dow)\\ncdc_day_births = dow_births(cdc_list) \\ncdc_day_births\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"## 5: Creating A More General Function\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"def calc_counts(data,column):\\n birth = {}\\n for item in data:\\n if item[column] in birth.keys():\\n birth[item[column]] += item[4]\\n else:\\n birth[item[column]] = item[4]\\n return(birth)\\ncdc_year_births = calc_counts(cdc_list, 0)\\ncdc_month_births = calc_counts(cdc_list, 1)\\ncdc_dom_births = calc_counts(cdc_list, 2)\\ncdc_dow_births = calc_counts(cdc_list, 3)\",\n \"_____no_output_____\"\n ],\n [\n \"cdc_year_births\",\n \"_____no_output_____\"\n ],\n [\n \"cdc_month_births\",\n \"_____no_output_____\"\n ],\n [\n \"cdc_dom_births\",\n \"_____no_output_____\"\n ],\n [\n \"cdc_dow_births\",\n \"_____no_output_____\"\n ],\n [\n \"def min_max(dictionary):\\n min_val = min(dictionary.items(), key=lambda k: k[1])\\n max_val = max(dictionary.items(), key=lambda k: k[1])\\n return(\\\"Minimum Value:%s Maximum Value:%s\\\"%(min_val,max_val))\\nmin_max(cdc_dow_births)\",\n \"_____no_output_____\"\n ]\n ]\n]"},"cell_types":{"kind":"list like","value":["markdown","code","markdown","code","markdown","code","markdown","code"],"string":"[\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\"\n]"},"cell_type_groups":{"kind":"list like","value":[["markdown"],["code"],["markdown"],["code"],["markdown"],["code","code"],["markdown"],["code","code","code","code","code","code"]],"string":"[\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\"\n ]\n]"}}},{"rowIdx":1459044,"cells":{"hexsha":{"kind":"string","value":"e7eea3a455977f80d6e7cd99813b739c915783b8"},"size":{"kind":"number","value":26449,"string":"26,449"},"ext":{"kind":"string","value":"ipynb"},"lang":{"kind":"string","value":"Jupyter Notebook"},"max_stars_repo_path":{"kind":"string","value":"examples/condor_dirhash/plot_iris_dataset.ipynb"},"max_stars_repo_name":{"kind":"string","value":"SmartDataInnovationLab/git_batch"},"max_stars_repo_head_hexsha":{"kind":"string","value":"bccaac72d52bd8dcf3a6da947cc0c43ca73dcefb"},"max_stars_repo_licenses":{"kind":"list like","value":["BSD-3-Clause"],"string":"[\n \"BSD-3-Clause\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"examples/condor_dirhash/plot_iris_dataset.ipynb"},"max_issues_repo_name":{"kind":"string","value":"SmartDataInnovationLab/git_batch"},"max_issues_repo_head_hexsha":{"kind":"string","value":"bccaac72d52bd8dcf3a6da947cc0c43ca73dcefb"},"max_issues_repo_licenses":{"kind":"list like","value":["BSD-3-Clause"],"string":"[\n \"BSD-3-Clause\"\n]"},"max_issues_count":{"kind":"number","value":16,"string":"16"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2018-03-19T12:33:14.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2018-08-30T13:02:23.000Z"},"max_forks_repo_path":{"kind":"string","value":"examples/condor_dirhash/plot_iris_dataset.ipynb"},"max_forks_repo_name":{"kind":"string","value":"SmartDataInnovationLab/git_batch"},"max_forks_repo_head_hexsha":{"kind":"string","value":"bccaac72d52bd8dcf3a6da947cc0c43ca73dcefb"},"max_forks_repo_licenses":{"kind":"list like","value":["BSD-3-Clause"],"string":"[\n \"BSD-3-Clause\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"avg_line_length":{"kind":"number","value":383.3188405797,"string":"383.318841"},"max_line_length":{"kind":"number","value":25068,"string":"25,068"},"alphanum_fraction":{"kind":"number","value":0.941018564,"string":"0.941019"},"cells":{"kind":"list like","value":[[["import matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom csv import reader\nimport numpy\n\niris = numpy.genfromtxt('data/iris.csv', delimiter=',', dtype=\"|U5\")\nX = iris[1:,:2].astype(numpy.float)\ny = numpy.unique(iris[1:,4], return_inverse=True)[1]\n\nplt.scatter(X[:, 0], X[:, 1], c=y)\nplt.xlim(3.5, 8.5)\nplt.ylim(1.5, 5)","_____no_output_____"]]],"string":"[\n [\n [\n \"import matplotlib.pyplot as plt\\nfrom mpl_toolkits.mplot3d import Axes3D\\nfrom csv import reader\\nimport numpy\\n\\niris = numpy.genfromtxt('data/iris.csv', delimiter=',', dtype=\\\"|U5\\\")\\nX = iris[1:,:2].astype(numpy.float)\\ny = numpy.unique(iris[1:,4], return_inverse=True)[1]\\n\\nplt.scatter(X[:, 0], X[:, 1], c=y)\\nplt.xlim(3.5, 8.5)\\nplt.ylim(1.5, 5)\",\n \"_____no_output_____\"\n ]\n ]\n]"},"cell_types":{"kind":"list like","value":["code"],"string":"[\n \"code\"\n]"},"cell_type_groups":{"kind":"list like","value":[["code"]],"string":"[\n [\n \"code\"\n ]\n]"}}},{"rowIdx":1459045,"cells":{"hexsha":{"kind":"string","value":"e7eeabea31f91bec025e1a82487a0792eaf9f181"},"size":{"kind":"number","value":20115,"string":"20,115"},"ext":{"kind":"string","value":"ipynb"},"lang":{"kind":"string","value":"Jupyter Notebook"},"max_stars_repo_path":{"kind":"string","value":"veri_final.ipynb"},"max_stars_repo_name":{"kind":"string","value":"JULYEN/kmeans-clustering"},"max_stars_repo_head_hexsha":{"kind":"string","value":"69e097a3ea0d052c060a72d3575a38cae81c2cac"},"max_stars_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"veri_final.ipynb"},"max_issues_repo_name":{"kind":"string","value":"JULYEN/kmeans-clustering"},"max_issues_repo_head_hexsha":{"kind":"string","value":"69e097a3ea0d052c060a72d3575a38cae81c2cac"},"max_issues_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"veri_final.ipynb"},"max_forks_repo_name":{"kind":"string","value":"JULYEN/kmeans-clustering"},"max_forks_repo_head_hexsha":{"kind":"string","value":"69e097a3ea0d052c060a72d3575a38cae81c2cac"},"max_forks_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"avg_line_length":{"kind":"number","value":191.5714285714,"string":"191.571429"},"max_line_length":{"kind":"number","value":17892,"string":"17,892"},"alphanum_fraction":{"kind":"number","value":0.9170270942,"string":"0.917027"},"cells":{"kind":"list like","value":[[["from pandas import DataFrame\nimport matplotlib.pyplot as plt\nfrom sklearn.cluster import KMeans","_____no_output_____"],["\nData = {'x': [25,34,22,27,23,24,31,22,35,26,28,54,57,43,36,27,29,52,32,47,39,48,35,33,44,45,38,43,41,46],\n 'y': [79,51,53,78,99,92,73,57,69,75,51,32,40,77,53,36,35,58,59,50,25,20,14,12,20,5,29,27,8,7]\n }","_____no_output_____"],["df = DataFrame(Data,columns=['x','y'])","_____no_output_____"],["kmeans = KMeans(n_clusters=2).fit(df)\ncentroids = kmeans.cluster_centers_\nprint(centroids)","[[31.6875 67.125 ]\n [41.35714286 22.14285714]]\n"],["plt.scatter(df['x'], df['y'], c= kmeans.labels_.astype(float), s=50, alpha=0.6)\nplt.scatter(centroids[:, 0], centroids[:, 1], c='blue', s=50)\nplt.title('Kişi Sonuç Değerlendirmesi')\nplt.ylabel('Başarı Puanı')\nplt.xlabel('Yaş ')\nplt.show()","_____no_output_____"]]],"string":"[\n [\n [\n \"from pandas import DataFrame\\nimport matplotlib.pyplot as plt\\nfrom sklearn.cluster import KMeans\",\n \"_____no_output_____\"\n ],\n [\n \"\\nData = {'x': [25,34,22,27,23,24,31,22,35,26,28,54,57,43,36,27,29,52,32,47,39,48,35,33,44,45,38,43,41,46],\\n 'y': [79,51,53,78,99,92,73,57,69,75,51,32,40,77,53,36,35,58,59,50,25,20,14,12,20,5,29,27,8,7]\\n }\",\n \"_____no_output_____\"\n ],\n [\n \"df = DataFrame(Data,columns=['x','y'])\",\n \"_____no_output_____\"\n ],\n [\n \"kmeans = KMeans(n_clusters=2).fit(df)\\ncentroids = kmeans.cluster_centers_\\nprint(centroids)\",\n \"[[31.6875 67.125 ]\\n [41.35714286 22.14285714]]\\n\"\n ],\n [\n \"plt.scatter(df['x'], df['y'], c= kmeans.labels_.astype(float), s=50, alpha=0.6)\\nplt.scatter(centroids[:, 0], centroids[:, 1], c='blue', s=50)\\nplt.title('Kişi Sonuç Değerlendirmesi')\\nplt.ylabel('Başarı Puanı')\\nplt.xlabel('Yaş ')\\nplt.show()\",\n \"_____no_output_____\"\n ]\n ]\n]"},"cell_types":{"kind":"list like","value":["code"],"string":"[\n \"code\"\n]"},"cell_type_groups":{"kind":"list like","value":[["code","code","code","code","code"]],"string":"[\n [\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\"\n ]\n]"}}},{"rowIdx":1459046,"cells":{"hexsha":{"kind":"string","value":"e7eeb5ba98456184532e96f78f40589931d63f0c"},"size":{"kind":"number","value":5152,"string":"5,152"},"ext":{"kind":"string","value":"ipynb"},"lang":{"kind":"string","value":"Jupyter Notebook"},"max_stars_repo_path":{"kind":"string","value":"ipynb/make_permissive_clusters.ipynb"},"max_stars_repo_name":{"kind":"string","value":"pdhsulab/GeneGraphDB"},"max_stars_repo_head_hexsha":{"kind":"string","value":"f2754a596d08680f7f0b092391653d9dbf477ddd"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":6,"string":"6"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2021-08-28T07:03:07.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2022-02-22T03:34:11.000Z"},"max_issues_repo_path":{"kind":"string","value":"ipynb/make_permissive_clusters.ipynb"},"max_issues_repo_name":{"kind":"string","value":"pdhsulab/GeneGraphDB"},"max_issues_repo_head_hexsha":{"kind":"string","value":"f2754a596d08680f7f0b092391653d9dbf477ddd"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"ipynb/make_permissive_clusters.ipynb"},"max_forks_repo_name":{"kind":"string","value":"pdhsulab/GeneGraphDB"},"max_forks_repo_head_hexsha":{"kind":"string","value":"f2754a596d08680f7f0b092391653d9dbf477ddd"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"avg_line_length":{"kind":"number","value":43.2941176471,"string":"43.294118"},"max_line_length":{"kind":"number","value":1155,"string":"1,155"},"alphanum_fraction":{"kind":"number","value":0.6055900621,"string":"0.60559"},"cells":{"kind":"list like","value":[[["import os\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom Bio import SeqIO\nimport csv\nimport sqlite3\nimport time","_____no_output_____"],["con=sqlite3.connect(\"80kprotein_stats.db\")\ncur = con.cursor()\ndef get_protein_seq(pid):\n #cmd = \"SELECT * FROM proteins WHERE hashid='%s'\" % pid\n cmd = \"SELECT * FROM proteins WHERE pid = '%s'\" % pid \n #print(cmd)\n cur.execute(cmd)\n return cur.fetchone()[-1]","_____no_output_____"],["path_clu_rep = \"../clusters/clu_rep_stringent_final.csv\"","_____no_output_____"],["outfile = open(\"../clusters/INPUT/permissive/clu_perm_mmseqs_input.faa\", \"w\")\nwith open(path_clu_rep, 'r') as file:\n reader = csv.reader(file)\n next(reader)\n prev_rep = \"\"\n for row in reader:\n stringent_rep = row[0]\n if prev_rep != stringent_rep:\n print(\">\" + stringent_rep, file = outfile)\n print(get_protein_seq(stringent_rep), file = outfile)\n prev_rep = stringent_rep","_____no_output_____"],["con.commit()\ncon.close()","_____no_output_____"]]],"string":"[\n [\n [\n \"import os\\nimport pandas as pd\\nimport matplotlib.pyplot as plt\\nfrom Bio import SeqIO\\nimport csv\\nimport sqlite3\\nimport time\",\n \"_____no_output_____\"\n ],\n [\n \"con=sqlite3.connect(\\\"80kprotein_stats.db\\\")\\ncur = con.cursor()\\ndef get_protein_seq(pid):\\n #cmd = \\\"SELECT * FROM proteins WHERE hashid='%s'\\\" % pid\\n cmd = \\\"SELECT * FROM proteins WHERE pid = '%s'\\\" % pid \\n #print(cmd)\\n cur.execute(cmd)\\n return cur.fetchone()[-1]\",\n \"_____no_output_____\"\n ],\n [\n \"path_clu_rep = \\\"../clusters/clu_rep_stringent_final.csv\\\"\",\n \"_____no_output_____\"\n ],\n [\n \"outfile = open(\\\"../clusters/INPUT/permissive/clu_perm_mmseqs_input.faa\\\", \\\"w\\\")\\nwith open(path_clu_rep, 'r') as file:\\n reader = csv.reader(file)\\n next(reader)\\n prev_rep = \\\"\\\"\\n for row in reader:\\n stringent_rep = row[0]\\n if prev_rep != stringent_rep:\\n print(\\\">\\\" + stringent_rep, file = outfile)\\n print(get_protein_seq(stringent_rep), file = outfile)\\n prev_rep = stringent_rep\",\n \"_____no_output_____\"\n ],\n [\n \"con.commit()\\ncon.close()\",\n \"_____no_output_____\"\n ]\n ]\n]"},"cell_types":{"kind":"list like","value":["code"],"string":"[\n \"code\"\n]"},"cell_type_groups":{"kind":"list like","value":[["code","code","code","code","code"]],"string":"[\n [\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\"\n ]\n]"}}},{"rowIdx":1459047,"cells":{"hexsha":{"kind":"string","value":"e7eed1c6f168719b4fd256dc918aeb6441f0d099"},"size":{"kind":"number","value":21387,"string":"21,387"},"ext":{"kind":"string","value":"ipynb"},"lang":{"kind":"string","value":"Jupyter Notebook"},"max_stars_repo_path":{"kind":"string","value":"Python for AI and DataScience/PY0101EN-3-4-Classes.ipynb"},"max_stars_repo_name":{"kind":"string","value":"amitkrishna/IBM-DataScience"},"max_stars_repo_head_hexsha":{"kind":"string","value":"4135a3acc768bda78ca8f999c61de23954a5330e"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"Python for AI and DataScience/PY0101EN-3-4-Classes.ipynb"},"max_issues_repo_name":{"kind":"string","value":"amitkrishna/IBM-DataScience"},"max_issues_repo_head_hexsha":{"kind":"string","value":"4135a3acc768bda78ca8f999c61de23954a5330e"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"Python for AI and DataScience/PY0101EN-3-4-Classes.ipynb"},"max_forks_repo_name":{"kind":"string","value":"amitkrishna/IBM-DataScience"},"max_forks_repo_head_hexsha":{"kind":"string","value":"4135a3acc768bda78ca8f999c61de23954a5330e"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"avg_line_length":{"kind":"number","value":27.8839634941,"string":"27.883963"},"max_line_length":{"kind":"number","value":716,"string":"716"},"alphanum_fraction":{"kind":"number","value":0.5769860195,"string":"0.576986"},"cells":{"kind":"list like","value":[[["\n \n","_____no_output_____"],["

Classes and Objects in Python

","_____no_output_____"],["

\n Welcome! \n Objects in programming are like objects in real life. Like life, there are different classes of objects. In this notebook, we will create two classes called Circle and Rectangle. By the end of this notebook, you will have a better idea about :\n

    \n
  • what a class is
  • \n
  • what an attribute is
  • \n
  • what a method is
  • \n
\n\n Don’t worry if you don’t get it the first time, as much of the terminology is confusing. Don’t forget to do the practice tests in the notebook.\n

","_____no_output_____"],["
\n \n \n \n
","_____no_output_____"],["

Table of Contents

\n\n\n
","_____no_output_____"],["

Introduction to Classes and Objects

","_____no_output_____"],["

Creating a Class

","_____no_output_____"],["The first part of creating a class is giving it a name: In this notebook, we will create two classes, Circle and Rectangle. We need to determine all the data that make up that class, and we call that an attribute. Think about this step as creating a blue print that we will use to create objects. In figure 1 we see two classes, circle and rectangle. Each has their attributes, they are variables. The class circle has the attribute radius and color, while the rectangle has the attribute height and width. Let’s use the visual examples of these shapes before we get to the code, as this will help you get accustomed to the vocabulary.","_____no_output_____"],["","_____no_output_____"],["Figure 1: Classes circle and rectangle, and each has their own attributes. The class circle has the attribute radius and colour, the rectangle has the attribute height and width.\n","_____no_output_____"],["

Instances of a Class: Objects and Attributes

","_____no_output_____"],["An instance of an object is the realisation of a class, and in Figure 2 we see three instances of the class circle. We give each object a name: red circle, yellow circle and green circle. Each object has different attributes, so let's focus on the attribute of colour for each object.","_____no_output_____"],["","_____no_output_____"],["Figure 2: Three instances of the class circle or three objects of type circle.","_____no_output_____"],[" The colour attribute for the red circle is the colour red, for the green circle object the colour attribute is green, and for the yellow circle the colour attribute is yellow. \n","_____no_output_____"],["

Methods

","_____no_output_____"],["Methods give you a way to change or interact with the object; they are functions that interact with objects. For example, let’s say we would like to increase the radius by a specified amount of a circle. We can create a method called **add_radius(r)** that increases the radius by **r**. This is shown in figure 3, where after applying the method to the \"orange circle object\", the radius of the object increases accordingly. The “dot” notation means to apply the method to the object, which is essentially applying a function to the information in the object.","_____no_output_____"],[" ","_____no_output_____"],["Figure 3: Applying the method “add_radius” to the object orange circle object.","_____no_output_____"],["
","_____no_output_____"],["

Creating a Class

","_____no_output_____"],["Now we are going to create a class circle, but first, we are going to import a library to draw the objects: ","_____no_output_____"]],[["# Import the library\n\nimport matplotlib.pyplot as plt\n%matplotlib inline ","_____no_output_____"]],[[" The first step in creating your own class is to use the class keyword, then the name of the class as shown in Figure 4. In this course the class parent will always be object: ","_____no_output_____"],["","_____no_output_____"],["Figure 4: Three instances of the class circle or three objects of type circle.","_____no_output_____"],["The next step is a special method called a constructor &#95;&#95;init&#95;&#95;, which is used to initialize the object. The input are data attributes. The term self contains all the attributes in the set. For example the self.color gives the value of the attribute color and self.radius will give you the radius of the object. We also have the method add_radius() with the parameter r, the method adds the value of r to the attribute radius. To access the radius we use the syntax self.radius. The labeled syntax is summarized in Figure 5:","_____no_output_____"],["","_____no_output_____"],["Figure 5: Labeled syntax of the object circle.","_____no_output_____"],["The actual object is shown below. We include the method drawCircle to display the image of a circle. We set the default radius to 3 and the default colour to blue:","_____no_output_____"]],[["# Create a class Circle\n\nclass Circle(object):\n \n # Constructor\n def __init__(self, radius=3, color='blue'):\n self.radius = radius\n self.color = color \n \n # Method\n def add_radius(self, r):\n self.radius = self.radius + r\n return(self.radius)\n \n # Method\n def drawCircle(self):\n plt.gca().add_patch(plt.Circle((0, 0), radius=self.radius, fc=self.color))\n plt.axis('scaled')\n plt.show() ","_____no_output_____"]],[["
","_____no_output_____"],["

Creating an instance of a class Circle

","_____no_output_____"],["Let’s create the object RedCircle of type Circle to do the following:","_____no_output_____"]],[["# Create an object RedCircle\n\nRedCircle = Circle(10, 'red')","_____no_output_____"]],[["We can use the dir command to get a list of the object's methods. Many of them are default Python methods.","_____no_output_____"]],[["# Find out the methods can be used on the object RedCircle\n\ndir(RedCircle)","_____no_output_____"]],[["We can look at the data attributes of the object: ","_____no_output_____"]],[["# Print the object attribute radius\n\nRedCircle.radius","_____no_output_____"],["# Print the object attribute color\n\nRedCircle.color","_____no_output_____"]],[[" We can change the object's data attributes: ","_____no_output_____"]],[["# Set the object attribute radius\n\nRedCircle.radius = 1\nRedCircle.radius","_____no_output_____"]],[[" We can draw the object by using the method drawCircle():","_____no_output_____"]],[["# Call the method drawCircle\n\nRedCircle.drawCircle()","_____no_output_____"]],[["We can increase the radius of the circle by applying the method add_radius(). Let increases the radius by 2 and then by 5: ","_____no_output_____"]],[["# Use method to change the object attribute radius\n\nprint('Radius of object:',RedCircle.radius)\nRedCircle.add_radius(2)\nprint('Radius of object of after applying the method add_radius(2):',RedCircle.radius)\nRedCircle.add_radius(5)\nprint('Radius of object of after applying the method add_radius(5):',RedCircle.radius)","_____no_output_____"]],[[" Let’s create a blue circle. As the default colour is blue, all we have to do is specify what the radius is:","_____no_output_____"]],[["# Create a blue circle with a given radius\n\nBlueCircle = Circle(radius=100)","_____no_output_____"]],[[" As before we can access the attributes of the instance of the class by using the dot notation:","_____no_output_____"]],[["# Print the object attribute radius\n\nBlueCircle.radius","_____no_output_____"],["# Print the object attribute color\n\nBlueCircle.color","_____no_output_____"]],[[" We can draw the object by using the method drawCircle():","_____no_output_____"]],[["# Call the method drawCircle\n\nBlueCircle.drawCircle()","_____no_output_____"]],[["Compare the x and y axis of the figure to the figure for RedCircle; they are different.","_____no_output_____"],["
","_____no_output_____"],["

The Rectangle Class

","_____no_output_____"],["Let's create a class rectangle with the attributes of height, width and color. We will only add the method to draw the rectangle object:","_____no_output_____"]],[["# Create a new Rectangle class for creating a rectangle object\n\nclass Rectangle(object):\n \n # Constructor\n def __init__(self, width=2, height=3, color='r'):\n self.height = height \n self.width = width\n self.color = color\n \n # Method\n def drawRectangle(self):\n plt.gca().add_patch(plt.Rectangle((0, 0), self.width, self.height ,fc=self.color))\n plt.axis('scaled')\n plt.show()","_____no_output_____"]],[["Let’s create the object SkinnyBlueRectangle of type Rectangle. Its width will be 2 and height will be 3, and the color will be blue:","_____no_output_____"]],[["# Create a new object rectangle\n\nSkinnyBlueRectangle = Rectangle(2, 10, 'blue')","_____no_output_____"]],[[" As before we can access the attributes of the instance of the class by using the dot notation:","_____no_output_____"]],[["# Print the object attribute height\n\nSkinnyBlueRectangle.height ","_____no_output_____"],["# Print the object attribute width\n\nSkinnyBlueRectangle.width","_____no_output_____"],["# Print the object attribute color\n\nSkinnyBlueRectangle.color","_____no_output_____"]],[[" We can draw the object:","_____no_output_____"]],[["# Use the drawRectangle method to draw the shape\n\nSkinnyBlueRectangle.drawRectangle()","_____no_output_____"]],[["Let’s create the object FatYellowRectangle of type Rectangle :","_____no_output_____"]],[["# Create a new object rectangle\n\nFatYellowRectangle = Rectangle(20, 5, 'yellow')","_____no_output_____"]],[[" We can access the attributes of the instance of the class by using the dot notation:","_____no_output_____"]],[["# Print the object attribute height\n\nFatYellowRectangle.height ","_____no_output_____"],["# Print the object attribute width\n\nFatYellowRectangle.width","_____no_output_____"],["# Print the object attribute color\n\nFatYellowRectangle.color","_____no_output_____"]],[[" We can draw the object:","_____no_output_____"]],[["# Use the drawRectangle method to draw the shape\n\nFatYellowRectangle.drawRectangle()","_____no_output_____"]],[["
\n

The last exercise!

\n

Congratulations, you have completed your first lesson and hands-on lab in Python. However, there is one more thing you need to do. The Data Science community encourages sharing work. The best way to share and showcase your work is to share it on GitHub. By sharing your notebook on GitHub you are not only building your reputation with fellow data scientists, but you can also show it off when applying for a job. Even though this was your first piece of work, it is never too early to start building good habits. So, please read and follow this article to learn how to share your work.\n


","_____no_output_____"],["
\n

Get IBM Watson Studio free of charge!

\n

\n
","_____no_output_____"],["

About the Authors:

\n

Joseph Santarcangelo is a Data Scientist at IBM, and holds a PhD in Electrical Engineering. His research focused on using Machine Learning, Signal Processing, and Computer Vision to determine how videos impact human cognition. Joseph has been working for IBM since he completed his PhD.

","_____no_output_____"],["Other contributors: Mavis Zhou","_____no_output_____"],["
","_____no_output_____"],["

Copyright &copy; 2018 IBM Developer Skills Network. This notebook and its source code are released under the terms of the MIT License.

","_____no_output_____"]]],"string":"[\n [\n [\n \"\\n \\n\",\n \"_____no_output_____\"\n ],\n [\n \"

Classes and Objects in Python

\",\n \"_____no_output_____\"\n ],\n [\n \"

\\n Welcome! \\n Objects in programming are like objects in real life. Like life, there are different classes of objects. In this notebook, we will create two classes called Circle and Rectangle. By the end of this notebook, you will have a better idea about :\\n

    \\n
  • what a class is
  • \\n
  • what an attribute is
  • \\n
  • what a method is
  • \\n
\\n\\n Don’t worry if you don’t get it the first time, as much of the terminology is confusing. Don’t forget to do the practice tests in the notebook.\\n

\",\n \"_____no_output_____\"\n ],\n [\n \"
\\n \\n \\n \\n
\",\n \"_____no_output_____\"\n ],\n [\n \"

Table of Contents

\\n\\n\\n
\",\n \"_____no_output_____\"\n ],\n [\n \"

Introduction to Classes and Objects

\",\n \"_____no_output_____\"\n ],\n [\n \"

Creating a Class

\",\n \"_____no_output_____\"\n ],\n [\n \"The first part of creating a class is giving it a name: In this notebook, we will create two classes, Circle and Rectangle. We need to determine all the data that make up that class, and we call that an attribute. Think about this step as creating a blue print that we will use to create objects. In figure 1 we see two classes, circle and rectangle. Each has their attributes, they are variables. The class circle has the attribute radius and color, while the rectangle has the attribute height and width. Let’s use the visual examples of these shapes before we get to the code, as this will help you get accustomed to the vocabulary.\",\n \"_____no_output_____\"\n ],\n [\n \"\",\n \"_____no_output_____\"\n ],\n [\n \"Figure 1: Classes circle and rectangle, and each has their own attributes. The class circle has the attribute radius and colour, the rectangle has the attribute height and width.\\n\",\n \"_____no_output_____\"\n ],\n [\n \"

Instances of a Class: Objects and Attributes

\",\n \"_____no_output_____\"\n ],\n [\n \"An instance of an object is the realisation of a class, and in Figure 2 we see three instances of the class circle. We give each object a name: red circle, yellow circle and green circle. Each object has different attributes, so let's focus on the attribute of colour for each object.\",\n \"_____no_output_____\"\n ],\n [\n \"\",\n \"_____no_output_____\"\n ],\n [\n \"Figure 2: Three instances of the class circle or three objects of type circle.\",\n \"_____no_output_____\"\n ],\n [\n \" The colour attribute for the red circle is the colour red, for the green circle object the colour attribute is green, and for the yellow circle the colour attribute is yellow. \\n\",\n \"_____no_output_____\"\n ],\n [\n \"

Methods

\",\n \"_____no_output_____\"\n ],\n [\n \"Methods give you a way to change or interact with the object; they are functions that interact with objects. For example, let’s say we would like to increase the radius by a specified amount of a circle. We can create a method called **add_radius(r)** that increases the radius by **r**. This is shown in figure 3, where after applying the method to the \\\"orange circle object\\\", the radius of the object increases accordingly. The “dot” notation means to apply the method to the object, which is essentially applying a function to the information in the object.\",\n \"_____no_output_____\"\n ],\n [\n \" \",\n \"_____no_output_____\"\n ],\n [\n \"Figure 3: Applying the method “add_radius” to the object orange circle object.\",\n \"_____no_output_____\"\n ],\n [\n \"
\",\n \"_____no_output_____\"\n ],\n [\n \"

Creating a Class

\",\n \"_____no_output_____\"\n ],\n [\n \"Now we are going to create a class circle, but first, we are going to import a library to draw the objects: \",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# Import the library\\n\\nimport matplotlib.pyplot as plt\\n%matplotlib inline \",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \" The first step in creating your own class is to use the class keyword, then the name of the class as shown in Figure 4. In this course the class parent will always be object: \",\n \"_____no_output_____\"\n ],\n [\n \"\",\n \"_____no_output_____\"\n ],\n [\n \"Figure 4: Three instances of the class circle or three objects of type circle.\",\n \"_____no_output_____\"\n ],\n [\n \"The next step is a special method called a constructor &#95;&#95;init&#95;&#95;, which is used to initialize the object. The input are data attributes. The term self contains all the attributes in the set. For example the self.color gives the value of the attribute color and self.radius will give you the radius of the object. We also have the method add_radius() with the parameter r, the method adds the value of r to the attribute radius. To access the radius we use the syntax self.radius. The labeled syntax is summarized in Figure 5:\",\n \"_____no_output_____\"\n ],\n [\n \"\",\n \"_____no_output_____\"\n ],\n [\n \"Figure 5: Labeled syntax of the object circle.\",\n \"_____no_output_____\"\n ],\n [\n \"The actual object is shown below. We include the method drawCircle to display the image of a circle. We set the default radius to 3 and the default colour to blue:\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# Create a class Circle\\n\\nclass Circle(object):\\n \\n # Constructor\\n def __init__(self, radius=3, color='blue'):\\n self.radius = radius\\n self.color = color \\n \\n # Method\\n def add_radius(self, r):\\n self.radius = self.radius + r\\n return(self.radius)\\n \\n # Method\\n def drawCircle(self):\\n plt.gca().add_patch(plt.Circle((0, 0), radius=self.radius, fc=self.color))\\n plt.axis('scaled')\\n plt.show() \",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"
\",\n \"_____no_output_____\"\n ],\n [\n \"

Creating an instance of a class Circle

\",\n \"_____no_output_____\"\n ],\n [\n \"Let’s create the object RedCircle of type Circle to do the following:\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# Create an object RedCircle\\n\\nRedCircle = Circle(10, 'red')\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"We can use the dir command to get a list of the object's methods. Many of them are default Python methods.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# Find out the methods can be used on the object RedCircle\\n\\ndir(RedCircle)\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"We can look at the data attributes of the object: \",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# Print the object attribute radius\\n\\nRedCircle.radius\",\n \"_____no_output_____\"\n ],\n [\n \"# Print the object attribute color\\n\\nRedCircle.color\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \" We can change the object's data attributes: \",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# Set the object attribute radius\\n\\nRedCircle.radius = 1\\nRedCircle.radius\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \" We can draw the object by using the method drawCircle():\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# Call the method drawCircle\\n\\nRedCircle.drawCircle()\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"We can increase the radius of the circle by applying the method add_radius(). Let increases the radius by 2 and then by 5: \",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# Use method to change the object attribute radius\\n\\nprint('Radius of object:',RedCircle.radius)\\nRedCircle.add_radius(2)\\nprint('Radius of object of after applying the method add_radius(2):',RedCircle.radius)\\nRedCircle.add_radius(5)\\nprint('Radius of object of after applying the method add_radius(5):',RedCircle.radius)\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \" Let’s create a blue circle. As the default colour is blue, all we have to do is specify what the radius is:\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# Create a blue circle with a given radius\\n\\nBlueCircle = Circle(radius=100)\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \" As before we can access the attributes of the instance of the class by using the dot notation:\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# Print the object attribute radius\\n\\nBlueCircle.radius\",\n \"_____no_output_____\"\n ],\n [\n \"# Print the object attribute color\\n\\nBlueCircle.color\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \" We can draw the object by using the method drawCircle():\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# Call the method drawCircle\\n\\nBlueCircle.drawCircle()\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"Compare the x and y axis of the figure to the figure for RedCircle; they are different.\",\n \"_____no_output_____\"\n ],\n [\n \"
\",\n \"_____no_output_____\"\n ],\n [\n \"

The Rectangle Class

\",\n \"_____no_output_____\"\n ],\n [\n \"Let's create a class rectangle with the attributes of height, width and color. We will only add the method to draw the rectangle object:\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# Create a new Rectangle class for creating a rectangle object\\n\\nclass Rectangle(object):\\n \\n # Constructor\\n def __init__(self, width=2, height=3, color='r'):\\n self.height = height \\n self.width = width\\n self.color = color\\n \\n # Method\\n def drawRectangle(self):\\n plt.gca().add_patch(plt.Rectangle((0, 0), self.width, self.height ,fc=self.color))\\n plt.axis('scaled')\\n plt.show()\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"Let’s create the object SkinnyBlueRectangle of type Rectangle. Its width will be 2 and height will be 3, and the color will be blue:\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# Create a new object rectangle\\n\\nSkinnyBlueRectangle = Rectangle(2, 10, 'blue')\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \" As before we can access the attributes of the instance of the class by using the dot notation:\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# Print the object attribute height\\n\\nSkinnyBlueRectangle.height \",\n \"_____no_output_____\"\n ],\n [\n \"# Print the object attribute width\\n\\nSkinnyBlueRectangle.width\",\n \"_____no_output_____\"\n ],\n [\n \"# Print the object attribute color\\n\\nSkinnyBlueRectangle.color\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \" We can draw the object:\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# Use the drawRectangle method to draw the shape\\n\\nSkinnyBlueRectangle.drawRectangle()\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"Let’s create the object FatYellowRectangle of type Rectangle :\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# Create a new object rectangle\\n\\nFatYellowRectangle = Rectangle(20, 5, 'yellow')\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \" We can access the attributes of the instance of the class by using the dot notation:\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# Print the object attribute height\\n\\nFatYellowRectangle.height \",\n \"_____no_output_____\"\n ],\n [\n \"# Print the object attribute width\\n\\nFatYellowRectangle.width\",\n \"_____no_output_____\"\n ],\n [\n \"# Print the object attribute color\\n\\nFatYellowRectangle.color\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \" We can draw the object:\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# Use the drawRectangle method to draw the shape\\n\\nFatYellowRectangle.drawRectangle()\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"
\\n

The last exercise!

\\n

Congratulations, you have completed your first lesson and hands-on lab in Python. However, there is one more thing you need to do. The Data Science community encourages sharing work. The best way to share and showcase your work is to share it on GitHub. By sharing your notebook on GitHub you are not only building your reputation with fellow data scientists, but you can also show it off when applying for a job. Even though this was your first piece of work, it is never too early to start building good habits. So, please read and follow this article to learn how to share your work.\\n


\",\n \"_____no_output_____\"\n ],\n [\n \"
\\n

Get IBM Watson Studio free of charge!

\\n

\\n
\",\n \"_____no_output_____\"\n ],\n [\n \"

About the Authors:

\\n

Joseph Santarcangelo is a Data Scientist at IBM, and holds a PhD in Electrical Engineering. His research focused on using Machine Learning, Signal Processing, and Computer Vision to determine how videos impact human cognition. Joseph has been working for IBM since he completed his PhD.

\",\n \"_____no_output_____\"\n ],\n [\n \"Other contributors: Mavis Zhou\",\n \"_____no_output_____\"\n ],\n [\n \"
\",\n \"_____no_output_____\"\n ],\n [\n \"

Copyright &copy; 2018 IBM Developer Skills Network. This notebook and its source code are released under the terms of the MIT License.

\",\n \"_____no_output_____\"\n ]\n ]\n]"},"cell_types":{"kind":"list like","value":["markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown"],"string":"[\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\"\n]"},"cell_type_groups":{"kind":"list like","value":[["markdown","markdown","markdown","markdown","markdown","markdown","markdown","markdown","markdown","markdown","markdown","markdown","markdown","markdown","markdown","markdown","markdown","markdown","markdown","markdown","markdown","markdown"],["code"],["markdown","markdown","markdown","markdown","markdown","markdown","markdown"],["code"],["markdown","markdown","markdown"],["code"],["markdown"],["code"],["markdown"],["code","code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code","code"],["markdown"],["code"],["markdown","markdown","markdown","markdown"],["code"],["markdown"],["code"],["markdown"],["code","code","code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code","code","code"],["markdown"],["code"],["markdown","markdown","markdown","markdown","markdown","markdown"]],"string":"[\n [\n \"markdown\",\n \"markdown\",\n \"markdown\",\n \"markdown\",\n \"markdown\",\n \"markdown\",\n \"markdown\",\n \"markdown\",\n \"markdown\",\n \"markdown\",\n \"markdown\",\n \"markdown\",\n \"markdown\",\n \"markdown\",\n \"markdown\",\n \"markdown\",\n \"markdown\",\n \"markdown\",\n \"markdown\",\n \"markdown\",\n \"markdown\",\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\",\n \"markdown\",\n \"markdown\",\n \"markdown\",\n \"markdown\",\n \"markdown\",\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\",\n \"markdown\",\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\",\n \"markdown\",\n \"markdown\",\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\",\n \"markdown\",\n \"markdown\",\n \"markdown\",\n \"markdown\",\n \"markdown\"\n ]\n]"}}},{"rowIdx":1459048,"cells":{"hexsha":{"kind":"string","value":"e7eee6e708e445378b01380be9d80e607ad25e9d"},"size":{"kind":"number","value":122545,"string":"122,545"},"ext":{"kind":"string","value":"ipynb"},"lang":{"kind":"string","value":"Jupyter Notebook"},"max_stars_repo_path":{"kind":"string","value":"notebooks/01-exploracao-dados.ipynb"},"max_stars_repo_name":{"kind":"string","value":"andersonnrc/projeto-bootcamp-carrefour-analise-dados"},"max_stars_repo_head_hexsha":{"kind":"string","value":"51d215b829e1ffc683d9baa686ca98e497bc1b92"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"notebooks/01-exploracao-dados.ipynb"},"max_issues_repo_name":{"kind":"string","value":"andersonnrc/projeto-bootcamp-carrefour-analise-dados"},"max_issues_repo_head_hexsha":{"kind":"string","value":"51d215b829e1ffc683d9baa686ca98e497bc1b92"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"notebooks/01-exploracao-dados.ipynb"},"max_forks_repo_name":{"kind":"string","value":"andersonnrc/projeto-bootcamp-carrefour-analise-dados"},"max_forks_repo_head_hexsha":{"kind":"string","value":"51d215b829e1ffc683d9baa686ca98e497bc1b92"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"avg_line_length":{"kind":"number","value":102.2913188648,"string":"102.291319"},"max_line_length":{"kind":"number","value":31048,"string":"31,048"},"alphanum_fraction":{"kind":"number","value":0.7852625566,"string":"0.785263"},"cells":{"kind":"list like","value":[[["# Despesas - Autorizações de Pagamento do Governo do Estado da Paraíba\n## De Janeiro/2021 a Junho/2021","_____no_output_____"]],[["# Instalação pacotes\n\n!pip install pandas\n!pip install PyMySQL\n!pip install SQLAlchemy","_____no_output_____"],["import pandas as pd","_____no_output_____"],["# Carregar CSVs em data frame do pandas\n\ndf1 = pd.read_csv('../data/pagamento_exercicio_2021_mes_1.csv', encoding='ISO-8859-1',sep=';')\ndf2 = pd.read_csv('../data/pagamento_exercicio_2021_mes_2.csv', encoding='ISO-8859-1',sep=';')\ndf3 = pd.read_csv('../data/pagamento_exercicio_2021_mes_3.csv', encoding='ISO-8859-1',sep=';')\ndf4 = pd.read_csv('../data/pagamento_exercicio_2021_mes_4.csv', encoding='ISO-8859-1',sep=';')\ndf5 = pd.read_csv('../data/pagamento_exercicio_2021_mes_5.csv', encoding='ISO-8859-1',sep=';')\ndf6 = pd.read_csv('../data/pagamento_exercicio_2021_mes_6.csv', encoding='ISO-8859-1',sep=';')","_____no_output_____"],["# Concatenar todos os dataframes\n\ndf = pd.concat([df1, df2, df3, df4, df5, df6])","_____no_output_____"]],[["## Realização de análises e transformações","_____no_output_____"]],[["# Exibir as colunas\n\ndf.columns","_____no_output_____"],["# Exibir quantidade de linhas e colunas\n\ndf.shape","_____no_output_____"],["# Exibir tipos das colunas\n\ndf.dtypes","_____no_output_____"],["# Converter coluna (DATA_PAGAMENTO) em datetime\n# Converter colunas (EXERCICIO, CODIGO_UNIDADE_GESTORA, NUMERO_EMPENHO, NUMERO_AUTORIZACAO_PAGAMENTO) em object\n\ndf[\"DATA_PAGAMENTO\"] = pd.to_datetime(df[\"DATA_PAGAMENTO\"])\ndf[\"EXERCICIO\"] = df[\"EXERCICIO\"].astype(\"object\")\ndf[\"CODIGO_UNIDADE_GESTORA\"] = df[\"CODIGO_UNIDADE_GESTORA\"].astype(\"object\")\ndf[\"NUMERO_EMPENHO\"] = df[\"CODIGO_UNIDADE_GESTORA\"].astype(\"object\")\ndf[\"NUMERO_AUTORIZACAO_PAGAMENTO\"] = df[\"NUMERO_AUTORIZACAO_PAGAMENTO\"].astype(\"object\")","_____no_output_____"],["# Exibir tipos das colunas\n\ndf.dtypes","_____no_output_____"],["# Consultar linhas com valores faltantes\n\ndf.isnull().sum()","_____no_output_____"],["# Exibir amostra\n\ndf.sample(10)","_____no_output_____"],["# Criar nova coluna que vai receber o mês de pagamento\n\ndf[\"MES_PAGAMENTO\"] = df[\"DATA_PAGAMENTO\"].dt.month","_____no_output_____"],["# Exibir amostra\n\ndf.sample(10)","_____no_output_____"],["# Conveter saída para coluna (VALOR_PAGAMENTO) com o tipo float\n\npd.options.display.float_format = 'R${:,.2f}'.format","_____no_output_____"],["# Retornar total pago agrupado por mês e por tipo de despesa\n\n# df.groupby([df[\"MES_PAGAMENTO\"], \"TIPO_DESPESA\"])[\"VALOR_PAGAMENTO\"].sum().reset_index()\n\n# Outra forma\ndf.groupby(['MES_PAGAMENTO', \"TIPO_DESPESA\"]).agg({\"VALOR_PAGAMENTO\":\"sum\"}).reset_index()","_____no_output_____"],["# Retornar maior valor pago a um credor agrupado por mês\n\n# df.groupby(df[\"MES_PAGAMENTO\"])[\"VALOR_PAGAMENTO\"].max()\n\ndf.groupby([\"MES_PAGAMENTO\"]).agg({\"VALOR_PAGAMENTO\":\"max\"}).reset_index()","_____no_output_____"],["# Salvar dataframe em um arquivo CSV\n\ndf.to_csv('../data/pagamento_exercicio_2021_jan_a_jun_governo_pb.csv', index=False)","_____no_output_____"],["# Salvar dataframe no banco de dados\n\nfrom sqlalchemy import create_engine\n\ncon = create_engine(\"mysql+pymysql://root:mysql@localhost:3307/db_governo_pb\",\n encoding=\"utf-8\")\ndf.to_sql('tb_pagamento_exercicio_2021', con, index = False, if_exists = 'replace', method = 'multi', chunksize=10000)","_____no_output_____"]],[["## Gráficos para análise exploratória e/ou tomada de decisão","_____no_output_____"]],[["import matplotlib.pyplot as plt\nplt.style.use(\"seaborn\")","_____no_output_____"],["# Gráfico com o total pago aos credores por mês (Janeiro a Junho)\n\ndf.groupby(df['MES_PAGAMENTO'])['VALOR_PAGAMENTO'].sum().plot.bar(title = 'Total Pago por Mês', color = 'blue')\nplt.xlabel('MÊS')\nplt.ylabel('RECEITA');","_____no_output_____"],["# Gráfico com o valor máximo pago a um credor por mês (Janeiro a Junho)\n\ndf.groupby([\"MES_PAGAMENTO\"]).agg({\"VALOR_PAGAMENTO\":\"max\"}).plot.bar(title = 'Maior valor pago a um credor po mês', color = 'green')\nplt.xlabel('MÊS')\nplt.ylabel('VALOR');","_____no_output_____"],["# Gráfico de linha exibindo a soma dos pagamentos a credores no decorrer dos meses\n\ndf.groupby([\"MES_PAGAMENTO\"]).agg({\"VALOR_PAGAMENTO\":\"sum\"}).plot(title = 'Total de pagamentos por mês aos credores')\nplt.xlabel('MÊS')\nplt.ylabel('TOTAL PAGO')\nplt.legend();","_____no_output_____"],["# Gráfico com o valor pago a credores agrupados por tipo de despesa\n\ndf.groupby([\"TIPO_DESPESA\"]).agg({\"VALOR_PAGAMENTO\":\"sum\"}).plot.bar(title = 'Soma dos valores pagos por tipo de despesa', color = 'gray')\nplt.xlabel('TIPO DE DESPESA')\nplt.ylabel('VALOR');","_____no_output_____"]]],"string":"[\n [\n [\n \"# Despesas - Autorizações de Pagamento do Governo do Estado da Paraíba\\n## De Janeiro/2021 a Junho/2021\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# Instalação pacotes\\n\\n!pip install pandas\\n!pip install PyMySQL\\n!pip install SQLAlchemy\",\n \"_____no_output_____\"\n ],\n [\n \"import pandas as pd\",\n \"_____no_output_____\"\n ],\n [\n \"# Carregar CSVs em data frame do pandas\\n\\ndf1 = pd.read_csv('../data/pagamento_exercicio_2021_mes_1.csv', encoding='ISO-8859-1',sep=';')\\ndf2 = pd.read_csv('../data/pagamento_exercicio_2021_mes_2.csv', encoding='ISO-8859-1',sep=';')\\ndf3 = pd.read_csv('../data/pagamento_exercicio_2021_mes_3.csv', encoding='ISO-8859-1',sep=';')\\ndf4 = pd.read_csv('../data/pagamento_exercicio_2021_mes_4.csv', encoding='ISO-8859-1',sep=';')\\ndf5 = pd.read_csv('../data/pagamento_exercicio_2021_mes_5.csv', encoding='ISO-8859-1',sep=';')\\ndf6 = pd.read_csv('../data/pagamento_exercicio_2021_mes_6.csv', encoding='ISO-8859-1',sep=';')\",\n \"_____no_output_____\"\n ],\n [\n \"# Concatenar todos os dataframes\\n\\ndf = pd.concat([df1, df2, df3, df4, df5, df6])\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"## Realização de análises e transformações\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# Exibir as colunas\\n\\ndf.columns\",\n \"_____no_output_____\"\n ],\n [\n \"# Exibir quantidade de linhas e colunas\\n\\ndf.shape\",\n \"_____no_output_____\"\n ],\n [\n \"# Exibir tipos das colunas\\n\\ndf.dtypes\",\n \"_____no_output_____\"\n ],\n [\n \"# Converter coluna (DATA_PAGAMENTO) em datetime\\n# Converter colunas (EXERCICIO, CODIGO_UNIDADE_GESTORA, NUMERO_EMPENHO, NUMERO_AUTORIZACAO_PAGAMENTO) em object\\n\\ndf[\\\"DATA_PAGAMENTO\\\"] = pd.to_datetime(df[\\\"DATA_PAGAMENTO\\\"])\\ndf[\\\"EXERCICIO\\\"] = df[\\\"EXERCICIO\\\"].astype(\\\"object\\\")\\ndf[\\\"CODIGO_UNIDADE_GESTORA\\\"] = df[\\\"CODIGO_UNIDADE_GESTORA\\\"].astype(\\\"object\\\")\\ndf[\\\"NUMERO_EMPENHO\\\"] = df[\\\"CODIGO_UNIDADE_GESTORA\\\"].astype(\\\"object\\\")\\ndf[\\\"NUMERO_AUTORIZACAO_PAGAMENTO\\\"] = df[\\\"NUMERO_AUTORIZACAO_PAGAMENTO\\\"].astype(\\\"object\\\")\",\n \"_____no_output_____\"\n ],\n [\n \"# Exibir tipos das colunas\\n\\ndf.dtypes\",\n \"_____no_output_____\"\n ],\n [\n \"# Consultar linhas com valores faltantes\\n\\ndf.isnull().sum()\",\n \"_____no_output_____\"\n ],\n [\n \"# Exibir amostra\\n\\ndf.sample(10)\",\n \"_____no_output_____\"\n ],\n [\n \"# Criar nova coluna que vai receber o mês de pagamento\\n\\ndf[\\\"MES_PAGAMENTO\\\"] = df[\\\"DATA_PAGAMENTO\\\"].dt.month\",\n \"_____no_output_____\"\n ],\n [\n \"# Exibir amostra\\n\\ndf.sample(10)\",\n \"_____no_output_____\"\n ],\n [\n \"# Conveter saída para coluna (VALOR_PAGAMENTO) com o tipo float\\n\\npd.options.display.float_format = 'R${:,.2f}'.format\",\n \"_____no_output_____\"\n ],\n [\n \"# Retornar total pago agrupado por mês e por tipo de despesa\\n\\n# df.groupby([df[\\\"MES_PAGAMENTO\\\"], \\\"TIPO_DESPESA\\\"])[\\\"VALOR_PAGAMENTO\\\"].sum().reset_index()\\n\\n# Outra forma\\ndf.groupby(['MES_PAGAMENTO', \\\"TIPO_DESPESA\\\"]).agg({\\\"VALOR_PAGAMENTO\\\":\\\"sum\\\"}).reset_index()\",\n \"_____no_output_____\"\n ],\n [\n \"# Retornar maior valor pago a um credor agrupado por mês\\n\\n# df.groupby(df[\\\"MES_PAGAMENTO\\\"])[\\\"VALOR_PAGAMENTO\\\"].max()\\n\\ndf.groupby([\\\"MES_PAGAMENTO\\\"]).agg({\\\"VALOR_PAGAMENTO\\\":\\\"max\\\"}).reset_index()\",\n \"_____no_output_____\"\n ],\n [\n \"# Salvar dataframe em um arquivo CSV\\n\\ndf.to_csv('../data/pagamento_exercicio_2021_jan_a_jun_governo_pb.csv', index=False)\",\n \"_____no_output_____\"\n ],\n [\n \"# Salvar dataframe no banco de dados\\n\\nfrom sqlalchemy import create_engine\\n\\ncon = create_engine(\\\"mysql+pymysql://root:mysql@localhost:3307/db_governo_pb\\\",\\n encoding=\\\"utf-8\\\")\\ndf.to_sql('tb_pagamento_exercicio_2021', con, index = False, if_exists = 'replace', method = 'multi', chunksize=10000)\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"## Gráficos para análise exploratória e/ou tomada de decisão\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"import matplotlib.pyplot as plt\\nplt.style.use(\\\"seaborn\\\")\",\n \"_____no_output_____\"\n ],\n [\n \"# Gráfico com o total pago aos credores por mês (Janeiro a Junho)\\n\\ndf.groupby(df['MES_PAGAMENTO'])['VALOR_PAGAMENTO'].sum().plot.bar(title = 'Total Pago por Mês', color = 'blue')\\nplt.xlabel('MÊS')\\nplt.ylabel('RECEITA');\",\n \"_____no_output_____\"\n ],\n [\n \"# Gráfico com o valor máximo pago a um credor por mês (Janeiro a Junho)\\n\\ndf.groupby([\\\"MES_PAGAMENTO\\\"]).agg({\\\"VALOR_PAGAMENTO\\\":\\\"max\\\"}).plot.bar(title = 'Maior valor pago a um credor po mês', color = 'green')\\nplt.xlabel('MÊS')\\nplt.ylabel('VALOR');\",\n \"_____no_output_____\"\n ],\n [\n \"# Gráfico de linha exibindo a soma dos pagamentos a credores no decorrer dos meses\\n\\ndf.groupby([\\\"MES_PAGAMENTO\\\"]).agg({\\\"VALOR_PAGAMENTO\\\":\\\"sum\\\"}).plot(title = 'Total de pagamentos por mês aos credores')\\nplt.xlabel('MÊS')\\nplt.ylabel('TOTAL PAGO')\\nplt.legend();\",\n \"_____no_output_____\"\n ],\n [\n \"# Gráfico com o valor pago a credores agrupados por tipo de despesa\\n\\ndf.groupby([\\\"TIPO_DESPESA\\\"]).agg({\\\"VALOR_PAGAMENTO\\\":\\\"sum\\\"}).plot.bar(title = 'Soma dos valores pagos por tipo de despesa', color = 'gray')\\nplt.xlabel('TIPO DE DESPESA')\\nplt.ylabel('VALOR');\",\n \"_____no_output_____\"\n ]\n ]\n]"},"cell_types":{"kind":"list like","value":["markdown","code","markdown","code","markdown","code"],"string":"[\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\"\n]"},"cell_type_groups":{"kind":"list like","value":[["markdown"],["code","code","code","code"],["markdown"],["code","code","code","code","code","code","code","code","code","code","code","code","code","code"],["markdown"],["code","code","code","code","code"]],"string":"[\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\"\n ]\n]"}}},{"rowIdx":1459049,"cells":{"hexsha":{"kind":"string","value":"e7eee7355e1737763300cf1df828956273307e27"},"size":{"kind":"number","value":13659,"string":"13,659"},"ext":{"kind":"string","value":"ipynb"},"lang":{"kind":"string","value":"Jupyter Notebook"},"max_stars_repo_path":{"kind":"string","value":"Chapter 2/8_Experimenting with different optimizers.ipynb"},"max_stars_repo_name":{"kind":"string","value":"Anacoder1/Python_DeepLearning_Cookbook"},"max_stars_repo_head_hexsha":{"kind":"string","value":"0a26b3948930333a357193c979b18269e1772651"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"Chapter 2/8_Experimenting with different optimizers.ipynb"},"max_issues_repo_name":{"kind":"string","value":"Anacoder1/Python_DeepLearning_Cookbook"},"max_issues_repo_head_hexsha":{"kind":"string","value":"0a26b3948930333a357193c979b18269e1772651"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"Chapter 2/8_Experimenting with different optimizers.ipynb"},"max_forks_repo_name":{"kind":"string","value":"Anacoder1/Python_DeepLearning_Cookbook"},"max_forks_repo_head_hexsha":{"kind":"string","value":"0a26b3948930333a357193c979b18269e1772651"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"number","value":2,"string":"2"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2019-11-29T02:23:59.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2020-11-30T06:49:29.000Z"},"avg_line_length":{"kind":"number","value":31.2562929062,"string":"31.256293"},"max_line_length":{"kind":"number","value":238,"string":"238"},"alphanum_fraction":{"kind":"number","value":0.3902189033,"string":"0.390219"},"cells":{"kind":"list like","value":[[["**Load the libraries:**","_____no_output_____"]],[["import numpy as np\nimport pandas as pd\n\nfrom sklearn.model_selection import train_test_split\n\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout\nfrom keras.callbacks import EarlyStopping, ModelCheckpoint\nfrom keras.optimizers import SGD, Adadelta, Adam, RMSprop, Adagrad, Nadam, Adamax\n\nSEED = 2017","Using TensorFlow backend.\n"]],[["**Import the dataset and extract the target variable:**","_____no_output_____"]],[["data = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/wine-quality/winequality-red.csv',\n sep = ';')\n\ny = data['quality']\nX = data.drop(['quality'], axis = 1)","_____no_output_____"]],[["**Split the dataset for training, validation and testing:**","_____no_output_____"]],[["X_train, X_test, y_train, y_test = train_test_split(X, y, \n test_size = 0.2,\n random_state = SEED)\n\nX_train, X_val, y_train, y_val = train_test_split(X_train, y_train,\n test_size = 0.2,\n random_state = SEED)","_____no_output_____"]],[["**Define a function that creates the model:**","_____no_output_____"]],[["def create_model(opt):\n model = Sequential()\n model.add(Dense(100, input_dim = X_train.shape[1],\n activation = 'relu'))\n model.add(Dense(50, activation = 'relu'))\n model.add(Dense(25, activation = 'relu'))\n model.add(Dense(10, activation = 'relu'))\n model.add(Dense(1, activation = 'linear'))\n return model","_____no_output_____"]],[["**Create a function that defines callbacks we will be using during training:**","_____no_output_____"]],[["def create_callbacks(opt):\n callbacks = [\n EarlyStopping(monitor = 'val_acc', patience = 200,\n verbose = 2),\n ModelCheckpoint('optimizers_best_' + opt + '.h5',\n monitor = 'val_acc',\n save_best_only = True,\n verbose = 0)\n ]\n return callbacks","_____no_output_____"]],[["**Create a dict of the optimizers we want to try:**","_____no_output_____"]],[["opts = dict({\n 'sgd': SGD(),\n 'sgd-0001': SGD(lr = 0.0001, decay = 0.00001),\n 'adam': Adam(),\n 'adadelta': Adadelta(),\n 'rmsprop': RMSprop(),\n 'rmsprop-0001': RMSprop(lr = 0.0001),\n 'nadam': Nadam(),\n 'adamax': Adamax()\n})","WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/op_def_library.py:263: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.\nInstructions for updating:\nColocations handled automatically by placer.\n"]],[["**Train our networks and store results:**","_____no_output_____"]],[["batch_size = 128\nn_epochs = 1000\n\nresults = []\n\n# Loop through the optimizers\nfor opt in opts:\n model = create_model(opt)\n callbacks = create_callbacks(opt)\n model.compile(loss = 'mse', \n optimizer = opts[opt],\n metrics = ['accuracy'])\n hist = model.fit(X_train.values, y_train, \n batch_size = batch_size,\n epochs = n_epochs,\n validation_data = (X_val.values, y_val),\n verbose = 0,\n callbacks = callbacks)\n \n best_epoch = np.argmax(hist.history['val_acc'])\n best_acc = hist.history['val_acc'][best_epoch]\n best_model = create_model(opt)\n \n # Load the model weights with the highest validation accuracy\n best_model.load_weights('optimizers_best_' + opt + '.h5')\n best_model.compile(loss = 'mse',\n optimizer = opts[opt],\n metrics = ['accuracy'])\n score = best_model.evaluate(X_test.values, y_test, verbose = 0)\n results.append([opt, best_epoch, best_acc, score[1]])","Epoch 00201: early stopping\nEpoch 00414: early stopping\nEpoch 00625: early stopping\nEpoch 00373: early stopping\nEpoch 00413: early stopping\nEpoch 00230: early stopping\nEpoch 00269: early stopping\nEpoch 00424: early stopping\n"]],[["**Compare the results:**","_____no_output_____"]],[["res = pd.DataFrame(results)\n\nres.columns = ['optimizer', 'epochs', 'val_accuracy', 'test_accuracy']\nres","_____no_output_____"]]],"string":"[\n [\n [\n \"**Load the libraries:**\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"import numpy as np\\nimport pandas as pd\\n\\nfrom sklearn.model_selection import train_test_split\\n\\nfrom keras.models import Sequential\\nfrom keras.layers import Dense, Dropout\\nfrom keras.callbacks import EarlyStopping, ModelCheckpoint\\nfrom keras.optimizers import SGD, Adadelta, Adam, RMSprop, Adagrad, Nadam, Adamax\\n\\nSEED = 2017\",\n \"Using TensorFlow backend.\\n\"\n ]\n ],\n [\n [\n \"**Import the dataset and extract the target variable:**\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"data = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/wine-quality/winequality-red.csv',\\n sep = ';')\\n\\ny = data['quality']\\nX = data.drop(['quality'], axis = 1)\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"**Split the dataset for training, validation and testing:**\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"X_train, X_test, y_train, y_test = train_test_split(X, y, \\n test_size = 0.2,\\n random_state = SEED)\\n\\nX_train, X_val, y_train, y_val = train_test_split(X_train, y_train,\\n test_size = 0.2,\\n random_state = SEED)\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"**Define a function that creates the model:**\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"def create_model(opt):\\n model = Sequential()\\n model.add(Dense(100, input_dim = X_train.shape[1],\\n activation = 'relu'))\\n model.add(Dense(50, activation = 'relu'))\\n model.add(Dense(25, activation = 'relu'))\\n model.add(Dense(10, activation = 'relu'))\\n model.add(Dense(1, activation = 'linear'))\\n return model\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"**Create a function that defines callbacks we will be using during training:**\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"def create_callbacks(opt):\\n callbacks = [\\n EarlyStopping(monitor = 'val_acc', patience = 200,\\n verbose = 2),\\n ModelCheckpoint('optimizers_best_' + opt + '.h5',\\n monitor = 'val_acc',\\n save_best_only = True,\\n verbose = 0)\\n ]\\n return callbacks\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"**Create a dict of the optimizers we want to try:**\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"opts = dict({\\n 'sgd': SGD(),\\n 'sgd-0001': SGD(lr = 0.0001, decay = 0.00001),\\n 'adam': Adam(),\\n 'adadelta': Adadelta(),\\n 'rmsprop': RMSprop(),\\n 'rmsprop-0001': RMSprop(lr = 0.0001),\\n 'nadam': Nadam(),\\n 'adamax': Adamax()\\n})\",\n \"WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/op_def_library.py:263: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.\\nInstructions for updating:\\nColocations handled automatically by placer.\\n\"\n ]\n ],\n [\n [\n \"**Train our networks and store results:**\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"batch_size = 128\\nn_epochs = 1000\\n\\nresults = []\\n\\n# Loop through the optimizers\\nfor opt in opts:\\n model = create_model(opt)\\n callbacks = create_callbacks(opt)\\n model.compile(loss = 'mse', \\n optimizer = opts[opt],\\n metrics = ['accuracy'])\\n hist = model.fit(X_train.values, y_train, \\n batch_size = batch_size,\\n epochs = n_epochs,\\n validation_data = (X_val.values, y_val),\\n verbose = 0,\\n callbacks = callbacks)\\n \\n best_epoch = np.argmax(hist.history['val_acc'])\\n best_acc = hist.history['val_acc'][best_epoch]\\n best_model = create_model(opt)\\n \\n # Load the model weights with the highest validation accuracy\\n best_model.load_weights('optimizers_best_' + opt + '.h5')\\n best_model.compile(loss = 'mse',\\n optimizer = opts[opt],\\n metrics = ['accuracy'])\\n score = best_model.evaluate(X_test.values, y_test, verbose = 0)\\n results.append([opt, best_epoch, best_acc, score[1]])\",\n \"Epoch 00201: early stopping\\nEpoch 00414: early stopping\\nEpoch 00625: early stopping\\nEpoch 00373: early stopping\\nEpoch 00413: early stopping\\nEpoch 00230: early stopping\\nEpoch 00269: early stopping\\nEpoch 00424: early stopping\\n\"\n ]\n ],\n [\n [\n \"**Compare the results:**\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"res = pd.DataFrame(results)\\n\\nres.columns = ['optimizer', 'epochs', 'val_accuracy', 'test_accuracy']\\nres\",\n \"_____no_output_____\"\n ]\n ]\n]"},"cell_types":{"kind":"list like","value":["markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code"],"string":"[\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\"\n]"},"cell_type_groups":{"kind":"list like","value":[["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"]],"string":"[\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ]\n]"}}},{"rowIdx":1459050,"cells":{"hexsha":{"kind":"string","value":"e7eef35004db6c11c7ab9cce5b2df42781d92bc5"},"size":{"kind":"number","value":182867,"string":"182,867"},"ext":{"kind":"string","value":"ipynb"},"lang":{"kind":"string","value":"Jupyter Notebook"},"max_stars_repo_path":{"kind":"string","value":"notebooks/2-Jupyter/jupyter-advanced-Copy1.ipynb"},"max_stars_repo_name":{"kind":"string","value":"burkesquires/jupyter_training_2020"},"max_stars_repo_head_hexsha":{"kind":"string","value":"63505d9b8133f80330fe92a74b7641066dba420c"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":2,"string":"2"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2020-11-18T19:29:20.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2021-09-09T13:52:29.000Z"},"max_issues_repo_path":{"kind":"string","value":"notebooks/2-Jupyter/jupyter-advanced-Copy1.ipynb"},"max_issues_repo_name":{"kind":"string","value":"burkesquires/jupyter_training_2020"},"max_issues_repo_head_hexsha":{"kind":"string","value":"63505d9b8133f80330fe92a74b7641066dba420c"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"notebooks/2-Jupyter/jupyter-advanced-Copy1.ipynb"},"max_forks_repo_name":{"kind":"string","value":"burkesquires/jupyter_training_2020"},"max_forks_repo_head_hexsha":{"kind":"string","value":"63505d9b8133f80330fe92a74b7641066dba420c"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"number","value":2,"string":"2"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2020-11-18T19:39:31.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2021-11-17T07:49:09.000Z"},"avg_line_length":{"kind":"number","value":43.8214713635,"string":"43.821471"},"max_line_length":{"kind":"number","value":4668,"string":"4,668"},"alphanum_fraction":{"kind":"number","value":0.5341805793,"string":"0.534181"},"cells":{"kind":"list like","value":[[["# Jupyter (IPython) Advanced Features\n---","_____no_output_____"],["Outline\n- Keyboard shortcuts\n- Magic\n- Accessing the underlying operating system\n- Using different languages inside single notebook\n- File magic\n- Using Jupyter more efficiently\n- Profiling\n- Output\n- Automation\n- Extensions\n- 'Big Data' Analysis\n \n\nSources: [IPython Tutorial](https://github.com/ipython/ipython-in-depth/blob/pycon-2019/1%20-%20Beyond%20Plain%20Python.ipynb), [Dataquest](https://www.dataquest.io/blog/advanced-jupyter-notebooks-tutorial/), and [Dataquest](https://www.dataquest.io/blog/jupyter-notebook-tips-tricks-shortcuts/), [Alex Rogozhnikov Blog](http://arogozhnikov.github.io/2016/09/10/jupyter-features.html) [Toward Data Science](https://towardsdatascience.com/how-to-effortlessly-optimize-jupyter-notebooks-e864162a06ee)","_____no_output_____"],["---","_____no_output_____"],["## Keyboard Shortcuts\n\n\nKeyboard Shortcuts\n\nAs in the classic Notebook, you can navigate the user interface through keyboard shortcuts. You can find and customize the current list of keyboard shortcuts by selecting the Advanced Settings Editor item in the Settings menu, then selecting Keyboard Shortcuts in the Settings tab.\n\n### Shortcut Keys for Jupyter lab\n\nWhile working with any tools, it helps if you know shortcut key to perform most frequent tasks. It increases your productivity and can be very comfortable while working. I have listed down some of the shortcuts which I use frequently while working on Jupyter Lab. Hopefully, it will be useful for others too. Also, you can check full list of shortcut by accessing the __commands tab__ in the Jupyter lab. You will find it below the Files on the left hand side.\n\n1. **ESC** takes users into command mode view while **ENTER** takes users into cell mode view.\n2. **A** inserts a cell above the currently selected cell. Before using this, make sure that you are in command mode (by pressing ESC).\n3. **B** inserts a cell below the currently selected cell. Before using this make sure that you are in command mode (by pressing ESC).\n4. **D + D** = Pressing D two times in a quick succession in command mode deletes the currently selected cell. \n5. Jupyter Lab gives you an option to change your cell into Code cell, Markdown cell or Raw Cell. You can use **M** to change current cell to a markdown cell, **Y** to change it to a code cell and  **R** to change it to a raw cell.\n6. ****CTRL + B**** = Jupyter lab has two columns design. One column is for launcher or code blocks and another column is for file view etc. To increase workspace while writing code, we can close it.  **CTRL + B** is the shortcut for toggling the file view column in the Jupyter lab.\n7. **SHIFT + M** = It merges multiple selected cells into one cell. \n8. **CTRL + SHIFT + –** = It splits the current cell into two cells from where your cursor is.\n9. **SHIFT+J** or **SHIFT + DOWN** = It selects the next cell in a downward direction.  It will help in making multiple selections of cells.\n10. **SHIFT + K** or **SHIFT + UP** = It selects the next cell in an upwards direction. It will help in making multiple selections of cells.\n11. **CTRL +** / = It helps you in either commenting or uncommenting any line in the Jupyter lab. For this to work, you don’t even need to select the whole line. It will comment or uncomment line where your cursor is. If you want to do it for more that one line then you will need to first select all the line and then use this shortcut.\n\nA PDF!!!\n- https://blog.ja-ke.tech/2019/01/20/jupyterlab-shortcuts.html\n- https://github.com/Jakeler/jupyter-shortcuts","_____no_output_____"],["## Magics\n\n---","_____no_output_____"],["Magics are turning simple python into *magical python*. Magics are the key to power of ipython.\n\nMagic functions are prefixed by % or %%, and typically take their arguments without parentheses, quotes or even commas for convenience. Line magics take a single % and cell magics are prefixed with two %%.","_____no_output_____"],["#### What is Magic??? Information about IPython's 'magic' % functions.","_____no_output_____"]],[["%magic","_____no_output_____"]],[["#### List available python magics","_____no_output_____"]],[["%lsmagic","_____no_output_____"]],[["#### %env\nYou can manage environment variables of your notebook without restarting the jupyter server process. Some libraries (like theano) use environment variables to control behavior, %env is the most convenient way.","_____no_output_____"]],[["# %env - without arguments lists environmental variables\n%env OMP_NUM_THREADS=4","_____no_output_____"]],[["# Accessing the underlying operating system\n\n---","_____no_output_____"],["## Executing shell commands\n\nYou can call any shell command. This in particular useful to manage your virtual environment.","_____no_output_____"]],[["!pip install numpy","Requirement already satisfied: numpy in /Users/squiresrb/opt/anaconda3/lib/python3.8/site-packages (1.19.2)\n"],["!pip list | grep Theano","_____no_output_____"]],[["## Adding packages can also be done using...","_____no_output_____"],["%conda install numpy","_____no_output_____"],["%pip install numpy","_____no_output_____"],["will attempt to install packages in the current environment.","_____no_output_____"]],[["!pwd","/Users/squiresrb/Documents/GitHub/jupyter_training_2020/notebooks/2-Jupyter\n"],["%pwd","_____no_output_____"],["pwd","_____no_output_____"],["files = !ls .\nprint(\"files in notebooks directory:\")\nprint(files)","files in notebooks directory:\n['2-1-Jupyter-ecosystem.ipynb', '2-1-jupyter-ecosystem.slides.html', '2-10-jupyter-code-script-of-scripts.ipynb', '2-11-Advanced-jupyter.ipynb', '2-2-jupyter-get-in-and-out.ipynb', '2-3-jupyter-notebook-basics.ipynb', '2-4-jupyter-markdown.ipynb', '2-5-jupyter-code-python.ipynb', '2-6-jupyter-code-r.ipynb', '2-7-jupyter-command-line.ipynb', '2-8-jupyter-magics.ipynb', '2-9-jupyter-sharing', '2-Jupyter-help.ipynb', 'Advanced_jupyter.ipynb', 'big-data-analysis-jupyter.ipynb', 'foo.py', 'images', 'jupyter-advanced.ipynb', 'matplotlib-anatomy.ipynb', 'pythoncode.py']\n"],["!echo $files","[2-1-Jupyter-ecosystem.ipynb, 2-1-jupyter-ecosystem.slides.html, 2-10-jupyter-code-script-of-scripts.ipynb, 2-11-Advanced-jupyter.ipynb, 2-2-jupyter-get-in-and-out.ipynb, 2-3-jupyter-notebook-basics.ipynb, 2-4-jupyter-markdown.ipynb, 2-5-jupyter-code-python.ipynb, 2-6-jupyter-code-r.ipynb, 2-7-jupyter-command-line.ipynb, 2-8-jupyter-magics.ipynb, 2-9-jupyter-sharing, 2-Jupyter-help.ipynb, Advanced_jupyter.ipynb, big-data-analysis-jupyter.ipynb, foo.py, images, jupyter-advanced.ipynb, matplotlib-anatomy.ipynb, pythoncode.py]\n"],["!echo {files[0].upper()}","2-1-JUPYTER-ECOSYSTEM.IPYNB\n"]],[["Note that all this is available even in multiline blocks:","_____no_output_____"]],[["import os\nfor i,f in enumerate(files):\n if f.endswith('ipynb'):\n !echo {\"%02d\" % i} - \"{os.path.splitext(f)[0]}\"\n else:\n print('--')","00 - 2-1-Jupyter-ecosystem\n--\n02 - 2-10-jupyter-code-script-of-scripts\n03 - 2-11-Advanced-jupyter\n04 - 2-2-jupyter-get-in-and-out\n05 - 2-3-jupyter-notebook-basics\n06 - 2-4-jupyter-markdown\n07 - 2-5-jupyter-code-python\n08 - 2-6-jupyter-code-r\n09 - 2-7-jupyter-command-line\n10 - 2-8-jupyter-magics\n--\n12 - 2-Jupyter-help\n13 - Advanced_jupyter\n14 - big-data-analysis-jupyter\n--\n--\n17 - jupyter-advanced\n18 - matplotlib-anatomy\n--\n"]],[["## I could take the same list with a bash command\n\nbecause magics and bash calls return python variables:","_____no_output_____"]],[["names = !ls ../images/ml_demonstrations/*.png\nnames[:5]","_____no_output_____"]],[["## Suppress output of last line\n\nsometimes output isn't needed, so we can either use `pass` instruction on new line or semicolon at the end ","_____no_output_____"],["%conda install matplotlib","_____no_output_____"]],[["%matplotlib inline\nfrom matplotlib import pyplot as plt\nimport numpy","_____no_output_____"],["# if you don't put semicolon at the end, you'll have output of function printed\n\nplt.hist(numpy.linspace(0, 1, 1000)**1.5);","_____no_output_____"]],[["# Using different languages inside single notebook\n\n---\n\nIf you're missing those much, using other computational kernels:\n\n- %%python2\n- %%python3\n- %%ruby\n- %%perl\n- %%bash\n- %%R\n\nis possible, but obviously you'll need to setup the corresponding kernel first.","_____no_output_____"]],[["# %%ruby\n# puts 'Hi, this is ruby.'","_____no_output_____"],["%%bash\necho 'Hi, this is bash.'","Hi, this is bash.\n"]],[["## Running R code in Jupyter notebook","_____no_output_____"],["#### Installing R kernel\n\nEasy Option: Installing the R Kernel Using Anaconda\nIf you used Anaconda to set up your environment, getting R working is extremely easy. Just run the below in your terminal:","_____no_output_____"]],[["# %conda install -c r r-essentials","_____no_output_____"]],[["#### Running R and Python in the same notebook.\n\nThe best solution to this is to install rpy2 (requires a working version of R as well), which can be easily done with pip:","_____no_output_____"]],[["%pip install rpy2","Collecting rpy2\n Downloading rpy2-3.3.6.tar.gz (179 kB)\n\u001b[K |████████████████████████████████| 179 kB 465 kB/s eta 0:00:01\n\u001b[31m ERROR: Command errored out with exit status 1:\n command: /Users/squiresrb/opt/anaconda3/bin/python -c 'import sys, setuptools, tokenize; sys.argv[0] = '\"'\"'/private/var/folders/d7/8vn6rd1d6f37gtgy_13h3b95mx3fmv/T/pip-install-79ynf4cx/rpy2/setup.py'\"'\"'; __file__='\"'\"'/private/var/folders/d7/8vn6rd1d6f37gtgy_13h3b95mx3fmv/T/pip-install-79ynf4cx/rpy2/setup.py'\"'\"';f=getattr(tokenize, '\"'\"'open'\"'\"', open)(__file__);code=f.read().replace('\"'\"'\\r\\n'\"'\"', '\"'\"'\\n'\"'\"');f.close();exec(compile(code, __file__, '\"'\"'exec'\"'\"'))' egg_info --egg-base /private/var/folders/d7/8vn6rd1d6f37gtgy_13h3b95mx3fmv/T/pip-pip-egg-info-8nhbgt8z\n cwd: /private/var/folders/d7/8vn6rd1d6f37gtgy_13h3b95mx3fmv/T/pip-install-79ynf4cx/rpy2/\n Complete output (2 lines):\n cffi mode: CFFI_MODE.ANY\n Error: rpy2 in API mode cannot be built without R in the PATH or R_HOME defined. Correct this or force ABI mode-only by defining the environment variable RPY2_CFFI_MODE=ABI\n ----------------------------------------\u001b[0m\n\u001b[31mERROR: Command errored out with exit status 1: python setup.py egg_info Check the logs for full command output.\u001b[0m\n\u001b[?25hNote: you may need to restart the kernel to use updated packages.\n"]],[["You can then use the two languages together, and even pass variables inbetween:","_____no_output_____"]],[["%load_ext rpy2.ipython","_____no_output_____"],["%R require(ggplot2)","_____no_output_____"],["import pandas as pd\ndf = pd.DataFrame({\n 'Letter': ['a', 'a', 'a', 'b', 'b', 'b', 'c', 'c', 'c'],\n 'X': [4, 3, 5, 2, 1, 7, 7, 5, 9],\n 'Y': [0, 4, 3, 6, 7, 10, 11, 9, 13],\n 'Z': [1, 2, 3, 1, 2, 3, 1, 2, 3]\n })","_____no_output_____"],["%%R -i df\nggplot(data = df) + geom_point(aes(x = X, y= Y, color = Letter, size = Z))","_____no_output_____"]],[["## Writing functions in cython (or fortran)\n\nSometimes the speed of numpy is not enough and I need to write some fast code. \nIn principle, you can compile function in the dynamic library and write python wrappers...\n\nBut it is much better when this boring part is done for you, right?\n\nYou can write functions in cython or fortran and use those directly from python code.\n\nFirst you'll need to install:\n```\n%pip install cython \n```","_____no_output_____"]],[["%pip install cython","_____no_output_____"],["%load_ext Cython","_____no_output_____"],["%%cython\ndef myltiply_by_2(float x):\n return 2.0 * x","_____no_output_____"],["myltiply_by_2(23.)","_____no_output_____"]],[["I also should mention that there are different jitter systems which can speed up your python code.\nMore examples in [my notebook](http://arogozhnikov.github.io/2015/09/08/SpeedBenchmarks.html). \n","_____no_output_____"],["For more information see the IPython help at: [Cython](https://github.com/ipython/ipython-in-depth/blob/pycon-2019/6%20-%20Cross-Language-Integration.ipynb)","_____no_output_____"],["# File magic","_____no_output_____"],["%%writefile Export the contents of a cell","_____no_output_____"]],[["%%writefile?","_____no_output_____"]],[["`%pycat` ill output in the pop-up window:\n```\nShow a syntax-highlighted file through a pager.\n\nThis magic is similar to the cat utility, but it will assume the file\nto be Python source and will show it with syntax highlighting.\n\nThis magic command can either take a local filename, an url,\nan history range (see %history) or a macro as argument ::\n\n%pycat myscript.py\n%pycat 7-27\n%pycat myMacro\n%pycat http://www.example.com/myscript.py\n```","_____no_output_____"],["## %load \nloading code directly into cell. You can pick local file or file on the web.\n\nAfter uncommenting the code below and executing, it will replace the content of cell with contents of file.\n","_____no_output_____"]],[["# %load https://matplotlib.org/_downloads/f7171577b84787f4b4d987b663486a94/anatomy.py","_____no_output_____"]],[["## %run to execute python code\n\n%run can execute python code from .py files &mdash; this is a well-documented behavior. \n\nBut it also can execute other jupyter notebooks! Sometimes it is quite useful.\n\nNB. %run is not the same as importing python module.","_____no_output_____"]],[["# this will execute all the code cells from different notebooks\n%run ./matplotlib-anatomy.ipynb","_____no_output_____"]],[["# Using Jupyter more efficiently\n\n---","_____no_output_____"],["## Store Magic - %store: lazy passing data between notebooks","_____no_output_____"],["%store lets you store your macro and use it across all of your Jupyter Notebooks.","_____no_output_____"]],[["data = 'this is the string I want to pass to different notebook'\n%store data\ndel data # deleted variable","_____no_output_____"],["# in second notebook I will use:\n%store -r data\nprint(data)","_____no_output_____"]],[["## %who: analyze variables of global scope","_____no_output_____"]],[["%whos","_____no_output_____"],["# pring names of string variables\n%who str","_____no_output_____"]],[["## Multiple cursors\n\nSince recently jupyter supports multiple cursors (in a single cell), just like sublime ot intelliJ! __Alt + mouse selection__ for multiline selection and __Ctrl + mouse clicks__ for multicursors.\n\n\n\nGif taken from http://swanintelligence.com/multi-cursor-in-jupyter.html","_____no_output_____"],["## Timing ","_____no_output_____"],["When you need to measure time spent or find the bottleneck in the code, ipython comes to the rescue.","_____no_output_____"],["%%time\nimport time\ntime.sleep(2) # sleep for two seconds","_____no_output_____"]],[["# measure small code snippets with timeit !\nimport numpy\n%timeit numpy.random.normal(size=100)","_____no_output_____"],["%%writefile pythoncode.py\n\nimport numpy\ndef append_if_not_exists(arr, x):\n if x not in arr:\n arr.append(x)\n \ndef some_useless_slow_function():\n arr = list()\n for i in range(10000):\n x = numpy.random.randint(0, 10000)\n append_if_not_exists(arr, x)","_____no_output_____"],["# shows highlighted source of the newly-created file\n%pycat pythoncode.py","_____no_output_____"],["from pythoncode import some_useless_slow_function, append_if_not_exists","_____no_output_____"]],[["## Hiding code or output","_____no_output_____"],["- Click on the blue vertical bar or line to the left to collapse code or output","_____no_output_____"],["## Commenting and uncommenting a block of code\n\nYou might want to add new lines of code and comment out the old lines while you’re working. This is great if you’re improving the performance of your code or trying to debug it.\n- First, select all the lines you want to comment out.\n- Next hit cmd + / to comment out the highlighted code!","_____no_output_____"],["## Pretty Print all cell outputs\n\nNormally only the last output in the cell will be printed. For everything else, you have to manually add print(), which is fine but not super convenient. You can change that by adding this at the top of the notebook:","_____no_output_____"]],[["from IPython.core.interactiveshell import InteractiveShell\nInteractiveShell.ast_node_interactivity = \"all\"","_____no_output_____"]],[["# Profiling: %prun, %lprun, %mprun\n---","_____no_output_____"],["See a much longer explination of profiling and timeing in Jake Vanderplas' Python Data Science Handbook: \nhttps://jakevdp.github.io/PythonDataScienceHandbook/01.07-timing-and-profiling.html","_____no_output_____"]],[["# shows how much time program spent in each function\n%prun some_useless_slow_function()","_____no_output_____"]],[["Example of output:\n```\n26338 function calls in 0.713 seconds\n\n Ordered by: internal time\n\n ncalls tottime percall cumtime percall filename:lineno(function)\n 10000 0.684 0.000 0.685 0.000 pythoncode.py:3(append_if_not_exists)\n 10000 0.014 0.000 0.014 0.000 {method 'randint' of 'mtrand.RandomState' objects}\n 1 0.011 0.011 0.713 0.713 pythoncode.py:7(some_useless_slow_function)\n 1 0.003 0.003 0.003 0.003 {range}\n 6334 0.001 0.000 0.001 0.000 {method 'append' of 'list' objects}\n 1 0.000 0.000 0.713 0.713 :1()\n 1 0.000 0.000 0.000 0.000 {method 'disable' of '_lsprof.Profiler' objects}\n```","_____no_output_____"]],[["# %load_ext memory_profiler ???","_____no_output_____"],["To profile memory, you can install and run pmrun\n\n# %pip install memory_profiler\n# %pip install line_profiler","_____no_output_____"],["# tracking memory consumption (show in the pop-up)\n# %mprun -f append_if_not_exists some_useless_slow_function()","_____no_output_____"]],[["Example of output:\n```\nLine # Mem usage Increment Line Contents\n================================================\n 3 20.6 MiB 0.0 MiB def append_if_not_exists(arr, x):\n 4 20.6 MiB 0.0 MiB if x not in arr:\n 5 20.6 MiB 0.0 MiB arr.append(x)\n```","_____no_output_____"],["**%lprun** is line profiling, but it seems to be broken for latest IPython release, so we'll manage without magic this time:","_____no_output_____"],["```python\nimport line_profiler\nlp = line_profiler.LineProfiler()\nlp.add_function(some_useless_slow_function)\nlp.runctx('some_useless_slow_function()', locals=locals(), globals=globals())\nlp.print_stats()\n```","_____no_output_____"],["## Debugging with %debug\n\nJupyter has own interface for [ipdb](https://docs.python.org/2/library/pdb.html). Makes it possible to go inside the function and investigate what happens there.\n\nThis is not pycharm and requires much time to adapt, but when debugging on the server this can be the only option (or use pdb from terminal).","_____no_output_____"]],[["#%%debug filename:line_number_for_breakpoint\n# Here some code that fails. This will activate interactive context for debugging","_____no_output_____"]],[["A bit easier option is `%pdb`, which activates debugger when exception is raised:","_____no_output_____"]],[["# %pdb\n\n# def pick_and_take():\n# picked = numpy.random.randint(0, 1000)\n# raise NotImplementedError()\n \n# pick_and_take()","_____no_output_____"]],[["# Output\n---","_____no_output_____"],["## [RISE](https://github.com/damianavila/RISE): presentations with notebook\n\nExtension by Damian Avila makes it possible to show notebooks as demonstrations. Example of such presentation: http://bollwyvl.github.io/live_reveal/#/7\n\nIt is very useful when you teach others e.g. to use some library.\n","_____no_output_____"],["## Jupyter output system\n\nNotebooks are displayed as HTML and the cell output can be HTML, so you can return virtually anything: video/audio/images. \n\nIn this example I scan the folder with images in my repository and show first five of them:","_____no_output_____"]],[["import os\nfrom IPython.display import display, Image\nnames = [f for f in os.listdir('../images/') if f.endswith('.png')]\nfor name in names[:5]:\n display(Image('../images/' + name, width=300))","_____no_output_____"]],[["## Write your posts in notebooks\n\nLike this one. Use `nbconvert` to export them to html.","_____no_output_____"],["# [Jupyter-contrib extensions](https://github.com/ipython-contrib/jupyter_contrib_nbextensions)\n\nare installed with \n```\n!pip install https://github.com/ipython-contrib/jupyter_contrib_nbextensions/tarball/master\n!pip install jupyter_nbextensions_configurator\n!jupyter contrib nbextension install --user\n!jupyter nbextensions_configurator enable --user\n```\n\n\n\nthis is a family of different extensions, including e.g. **jupyter spell-checker and code-formatter**, \nthat are missing in jupyter by default. ","_____no_output_____"],["## Reconnect to kernel\n\nLong before, when you started some long-taking process and at some point your connection to ipython server dropped, \nyou completely lost the ability to track the computations process (unless you wrote this information to file). So either you interrupt the kernel and potentially lose some progress, or you wait till it completes without any idea of what is happening.\n\n`Reconnect to kernel` option now makes it possible to connect again to running kernel without interrupting computations and get the newcoming output shown (but some part of output is already lost).","_____no_output_____"],["# Big data analysis\n\nA number of solutions are available for querying/processing large data samples: \n- [ipyparallel (formerly ipython cluster)](https://github.com/ipython/ipyparallel) is a good option for simple map-reduce operations in python. We use it in [rep](github.com/yandex/rep) to train many machine learning models in parallel\n- [pyspark](http://www.cloudera.com/documentation/enterprise/5-5-x/topics/spark_ipython.html)\n- spark-sql magic [%%sql](https://github.com/jupyter-incubator/sparkmagic)","_____no_output_____"],["Additional Resources:\n\n* IPython [built-in magics](https://ipython.org/ipython-doc/3/interactive/magics.html)\n* Nice [interactive presentation about jupyter](http://quasiben.github.io/dfwmeetup_2014/#/) by Ben Zaitlen\n* Advanced notebooks [part 1: magics](https://blog.dominodatalab.com/lesser-known-ways-of-using-notebooks/) and [part 2: widgets](https://blog.dominodatalab.com/interactive-dashboards-in-jupyter/)\n* [Profiling in python with jupyter](http://pynash.org/2013/03/06/timing-and-profiling/)\n* [4 ways to extend notebooks](http://mindtrove.info/4-ways-to-extend-jupyter-notebook/)\n* [IPython notebook tricks](https://www.quora.com/What-are-your-favorite-tricks-for-IPython-Notebook)\n* [Jupyter vs Zeppelin for big data](https://www.linkedin.com/pulse/comprehensive-comparison-jupyter-vs-zeppelin-hoc-q-phan-mba-)\n* [Making publication ready Python notebooks](http://blog.juliusschulz.de/blog/ultimate-ipython-notebook).\n* https://yoursdata.net/installing-and-configuring-jupyter-lab-on-windows/","_____no_output_____"]]],"string":"[\n [\n [\n \"# Jupyter (IPython) Advanced Features\\n---\",\n \"_____no_output_____\"\n ],\n [\n \"Outline\\n- Keyboard shortcuts\\n- Magic\\n- Accessing the underlying operating system\\n- Using different languages inside single notebook\\n- File magic\\n- Using Jupyter more efficiently\\n- Profiling\\n- Output\\n- Automation\\n- Extensions\\n- 'Big Data' Analysis\\n \\n\\nSources: [IPython Tutorial](https://github.com/ipython/ipython-in-depth/blob/pycon-2019/1%20-%20Beyond%20Plain%20Python.ipynb), [Dataquest](https://www.dataquest.io/blog/advanced-jupyter-notebooks-tutorial/), and [Dataquest](https://www.dataquest.io/blog/jupyter-notebook-tips-tricks-shortcuts/), [Alex Rogozhnikov Blog](http://arogozhnikov.github.io/2016/09/10/jupyter-features.html) [Toward Data Science](https://towardsdatascience.com/how-to-effortlessly-optimize-jupyter-notebooks-e864162a06ee)\",\n \"_____no_output_____\"\n ],\n [\n \"---\",\n \"_____no_output_____\"\n ],\n [\n \"## Keyboard Shortcuts\\n\\n\\nKeyboard Shortcuts\\n\\nAs in the classic Notebook, you can navigate the user interface through keyboard shortcuts. You can find and customize the current list of keyboard shortcuts by selecting the Advanced Settings Editor item in the Settings menu, then selecting Keyboard Shortcuts in the Settings tab.\\n\\n### Shortcut Keys for Jupyter lab\\n\\nWhile working with any tools, it helps if you know shortcut key to perform most frequent tasks. It increases your productivity and can be very comfortable while working. I have listed down some of the shortcuts which I use frequently while working on Jupyter Lab. Hopefully, it will be useful for others too. Also, you can check full list of shortcut by accessing the __commands tab__ in the Jupyter lab. You will find it below the Files on the left hand side.\\n\\n1. **ESC** takes users into command mode view while **ENTER** takes users into cell mode view.\\n2. **A** inserts a cell above the currently selected cell. Before using this, make sure that you are in command mode (by pressing ESC).\\n3. **B** inserts a cell below the currently selected cell. Before using this make sure that you are in command mode (by pressing ESC).\\n4. **D + D** = Pressing D two times in a quick succession in command mode deletes the currently selected cell. \\n5. Jupyter Lab gives you an option to change your cell into Code cell, Markdown cell or Raw Cell. You can use **M** to change current cell to a markdown cell, **Y** to change it to a code cell and  **R** to change it to a raw cell.\\n6. ****CTRL + B**** = Jupyter lab has two columns design. One column is for launcher or code blocks and another column is for file view etc. To increase workspace while writing code, we can close it.  **CTRL + B** is the shortcut for toggling the file view column in the Jupyter lab.\\n7. **SHIFT + M** = It merges multiple selected cells into one cell. \\n8. **CTRL + SHIFT + –** = It splits the current cell into two cells from where your cursor is.\\n9. **SHIFT+J** or **SHIFT + DOWN** = It selects the next cell in a downward direction.  It will help in making multiple selections of cells.\\n10. **SHIFT + K** or **SHIFT + UP** = It selects the next cell in an upwards direction. It will help in making multiple selections of cells.\\n11. **CTRL +** / = It helps you in either commenting or uncommenting any line in the Jupyter lab. For this to work, you don’t even need to select the whole line. It will comment or uncomment line where your cursor is. If you want to do it for more that one line then you will need to first select all the line and then use this shortcut.\\n\\nA PDF!!!\\n- https://blog.ja-ke.tech/2019/01/20/jupyterlab-shortcuts.html\\n- https://github.com/Jakeler/jupyter-shortcuts\",\n \"_____no_output_____\"\n ],\n [\n \"## Magics\\n\\n---\",\n \"_____no_output_____\"\n ],\n [\n \"Magics are turning simple python into *magical python*. Magics are the key to power of ipython.\\n\\nMagic functions are prefixed by % or %%, and typically take their arguments without parentheses, quotes or even commas for convenience. Line magics take a single % and cell magics are prefixed with two %%.\",\n \"_____no_output_____\"\n ],\n [\n \"#### What is Magic??? Information about IPython's 'magic' % functions.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"%magic\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"#### List available python magics\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"%lsmagic\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"#### %env\\nYou can manage environment variables of your notebook without restarting the jupyter server process. Some libraries (like theano) use environment variables to control behavior, %env is the most convenient way.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# %env - without arguments lists environmental variables\\n%env OMP_NUM_THREADS=4\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# Accessing the underlying operating system\\n\\n---\",\n \"_____no_output_____\"\n ],\n [\n \"## Executing shell commands\\n\\nYou can call any shell command. This in particular useful to manage your virtual environment.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"!pip install numpy\",\n \"Requirement already satisfied: numpy in /Users/squiresrb/opt/anaconda3/lib/python3.8/site-packages (1.19.2)\\n\"\n ],\n [\n \"!pip list | grep Theano\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"## Adding packages can also be done using...\",\n \"_____no_output_____\"\n ],\n [\n \"%conda install numpy\",\n \"_____no_output_____\"\n ],\n [\n \"%pip install numpy\",\n \"_____no_output_____\"\n ],\n [\n \"will attempt to install packages in the current environment.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"!pwd\",\n \"/Users/squiresrb/Documents/GitHub/jupyter_training_2020/notebooks/2-Jupyter\\n\"\n ],\n [\n \"%pwd\",\n \"_____no_output_____\"\n ],\n [\n \"pwd\",\n \"_____no_output_____\"\n ],\n [\n \"files = !ls .\\nprint(\\\"files in notebooks directory:\\\")\\nprint(files)\",\n \"files in notebooks directory:\\n['2-1-Jupyter-ecosystem.ipynb', '2-1-jupyter-ecosystem.slides.html', '2-10-jupyter-code-script-of-scripts.ipynb', '2-11-Advanced-jupyter.ipynb', '2-2-jupyter-get-in-and-out.ipynb', '2-3-jupyter-notebook-basics.ipynb', '2-4-jupyter-markdown.ipynb', '2-5-jupyter-code-python.ipynb', '2-6-jupyter-code-r.ipynb', '2-7-jupyter-command-line.ipynb', '2-8-jupyter-magics.ipynb', '2-9-jupyter-sharing', '2-Jupyter-help.ipynb', 'Advanced_jupyter.ipynb', 'big-data-analysis-jupyter.ipynb', 'foo.py', 'images', 'jupyter-advanced.ipynb', 'matplotlib-anatomy.ipynb', 'pythoncode.py']\\n\"\n ],\n [\n \"!echo $files\",\n \"[2-1-Jupyter-ecosystem.ipynb, 2-1-jupyter-ecosystem.slides.html, 2-10-jupyter-code-script-of-scripts.ipynb, 2-11-Advanced-jupyter.ipynb, 2-2-jupyter-get-in-and-out.ipynb, 2-3-jupyter-notebook-basics.ipynb, 2-4-jupyter-markdown.ipynb, 2-5-jupyter-code-python.ipynb, 2-6-jupyter-code-r.ipynb, 2-7-jupyter-command-line.ipynb, 2-8-jupyter-magics.ipynb, 2-9-jupyter-sharing, 2-Jupyter-help.ipynb, Advanced_jupyter.ipynb, big-data-analysis-jupyter.ipynb, foo.py, images, jupyter-advanced.ipynb, matplotlib-anatomy.ipynb, pythoncode.py]\\n\"\n ],\n [\n \"!echo {files[0].upper()}\",\n \"2-1-JUPYTER-ECOSYSTEM.IPYNB\\n\"\n ]\n ],\n [\n [\n \"Note that all this is available even in multiline blocks:\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"import os\\nfor i,f in enumerate(files):\\n if f.endswith('ipynb'):\\n !echo {\\\"%02d\\\" % i} - \\\"{os.path.splitext(f)[0]}\\\"\\n else:\\n print('--')\",\n \"00 - 2-1-Jupyter-ecosystem\\n--\\n02 - 2-10-jupyter-code-script-of-scripts\\n03 - 2-11-Advanced-jupyter\\n04 - 2-2-jupyter-get-in-and-out\\n05 - 2-3-jupyter-notebook-basics\\n06 - 2-4-jupyter-markdown\\n07 - 2-5-jupyter-code-python\\n08 - 2-6-jupyter-code-r\\n09 - 2-7-jupyter-command-line\\n10 - 2-8-jupyter-magics\\n--\\n12 - 2-Jupyter-help\\n13 - Advanced_jupyter\\n14 - big-data-analysis-jupyter\\n--\\n--\\n17 - jupyter-advanced\\n18 - matplotlib-anatomy\\n--\\n\"\n ]\n ],\n [\n [\n \"## I could take the same list with a bash command\\n\\nbecause magics and bash calls return python variables:\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"names = !ls ../images/ml_demonstrations/*.png\\nnames[:5]\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"## Suppress output of last line\\n\\nsometimes output isn't needed, so we can either use `pass` instruction on new line or semicolon at the end \",\n \"_____no_output_____\"\n ],\n [\n \"%conda install matplotlib\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"%matplotlib inline\\nfrom matplotlib import pyplot as plt\\nimport numpy\",\n \"_____no_output_____\"\n ],\n [\n \"# if you don't put semicolon at the end, you'll have output of function printed\\n\\nplt.hist(numpy.linspace(0, 1, 1000)**1.5);\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# Using different languages inside single notebook\\n\\n---\\n\\nIf you're missing those much, using other computational kernels:\\n\\n- %%python2\\n- %%python3\\n- %%ruby\\n- %%perl\\n- %%bash\\n- %%R\\n\\nis possible, but obviously you'll need to setup the corresponding kernel first.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# %%ruby\\n# puts 'Hi, this is ruby.'\",\n \"_____no_output_____\"\n ],\n [\n \"%%bash\\necho 'Hi, this is bash.'\",\n \"Hi, this is bash.\\n\"\n ]\n ],\n [\n [\n \"## Running R code in Jupyter notebook\",\n \"_____no_output_____\"\n ],\n [\n \"#### Installing R kernel\\n\\nEasy Option: Installing the R Kernel Using Anaconda\\nIf you used Anaconda to set up your environment, getting R working is extremely easy. Just run the below in your terminal:\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# %conda install -c r r-essentials\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"#### Running R and Python in the same notebook.\\n\\nThe best solution to this is to install rpy2 (requires a working version of R as well), which can be easily done with pip:\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"%pip install rpy2\",\n \"Collecting rpy2\\n Downloading rpy2-3.3.6.tar.gz (179 kB)\\n\\u001b[K |████████████████████████████████| 179 kB 465 kB/s eta 0:00:01\\n\\u001b[31m ERROR: Command errored out with exit status 1:\\n command: /Users/squiresrb/opt/anaconda3/bin/python -c 'import sys, setuptools, tokenize; sys.argv[0] = '\\\"'\\\"'/private/var/folders/d7/8vn6rd1d6f37gtgy_13h3b95mx3fmv/T/pip-install-79ynf4cx/rpy2/setup.py'\\\"'\\\"'; __file__='\\\"'\\\"'/private/var/folders/d7/8vn6rd1d6f37gtgy_13h3b95mx3fmv/T/pip-install-79ynf4cx/rpy2/setup.py'\\\"'\\\"';f=getattr(tokenize, '\\\"'\\\"'open'\\\"'\\\"', open)(__file__);code=f.read().replace('\\\"'\\\"'\\\\r\\\\n'\\\"'\\\"', '\\\"'\\\"'\\\\n'\\\"'\\\"');f.close();exec(compile(code, __file__, '\\\"'\\\"'exec'\\\"'\\\"'))' egg_info --egg-base /private/var/folders/d7/8vn6rd1d6f37gtgy_13h3b95mx3fmv/T/pip-pip-egg-info-8nhbgt8z\\n cwd: /private/var/folders/d7/8vn6rd1d6f37gtgy_13h3b95mx3fmv/T/pip-install-79ynf4cx/rpy2/\\n Complete output (2 lines):\\n cffi mode: CFFI_MODE.ANY\\n Error: rpy2 in API mode cannot be built without R in the PATH or R_HOME defined. Correct this or force ABI mode-only by defining the environment variable RPY2_CFFI_MODE=ABI\\n ----------------------------------------\\u001b[0m\\n\\u001b[31mERROR: Command errored out with exit status 1: python setup.py egg_info Check the logs for full command output.\\u001b[0m\\n\\u001b[?25hNote: you may need to restart the kernel to use updated packages.\\n\"\n ]\n ],\n [\n [\n \"You can then use the two languages together, and even pass variables inbetween:\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"%load_ext rpy2.ipython\",\n \"_____no_output_____\"\n ],\n [\n \"%R require(ggplot2)\",\n \"_____no_output_____\"\n ],\n [\n \"import pandas as pd\\ndf = pd.DataFrame({\\n 'Letter': ['a', 'a', 'a', 'b', 'b', 'b', 'c', 'c', 'c'],\\n 'X': [4, 3, 5, 2, 1, 7, 7, 5, 9],\\n 'Y': [0, 4, 3, 6, 7, 10, 11, 9, 13],\\n 'Z': [1, 2, 3, 1, 2, 3, 1, 2, 3]\\n })\",\n \"_____no_output_____\"\n ],\n [\n \"%%R -i df\\nggplot(data = df) + geom_point(aes(x = X, y= Y, color = Letter, size = Z))\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"## Writing functions in cython (or fortran)\\n\\nSometimes the speed of numpy is not enough and I need to write some fast code. \\nIn principle, you can compile function in the dynamic library and write python wrappers...\\n\\nBut it is much better when this boring part is done for you, right?\\n\\nYou can write functions in cython or fortran and use those directly from python code.\\n\\nFirst you'll need to install:\\n```\\n%pip install cython \\n```\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"%pip install cython\",\n \"_____no_output_____\"\n ],\n [\n \"%load_ext Cython\",\n \"_____no_output_____\"\n ],\n [\n \"%%cython\\ndef myltiply_by_2(float x):\\n return 2.0 * x\",\n \"_____no_output_____\"\n ],\n [\n \"myltiply_by_2(23.)\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"I also should mention that there are different jitter systems which can speed up your python code.\\nMore examples in [my notebook](http://arogozhnikov.github.io/2015/09/08/SpeedBenchmarks.html). \\n\",\n \"_____no_output_____\"\n ],\n [\n \"For more information see the IPython help at: [Cython](https://github.com/ipython/ipython-in-depth/blob/pycon-2019/6%20-%20Cross-Language-Integration.ipynb)\",\n \"_____no_output_____\"\n ],\n [\n \"# File magic\",\n \"_____no_output_____\"\n ],\n [\n \"%%writefile Export the contents of a cell\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"%%writefile?\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"`%pycat` ill output in the pop-up window:\\n```\\nShow a syntax-highlighted file through a pager.\\n\\nThis magic is similar to the cat utility, but it will assume the file\\nto be Python source and will show it with syntax highlighting.\\n\\nThis magic command can either take a local filename, an url,\\nan history range (see %history) or a macro as argument ::\\n\\n%pycat myscript.py\\n%pycat 7-27\\n%pycat myMacro\\n%pycat http://www.example.com/myscript.py\\n```\",\n \"_____no_output_____\"\n ],\n [\n \"## %load \\nloading code directly into cell. You can pick local file or file on the web.\\n\\nAfter uncommenting the code below and executing, it will replace the content of cell with contents of file.\\n\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# %load https://matplotlib.org/_downloads/f7171577b84787f4b4d987b663486a94/anatomy.py\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"## %run to execute python code\\n\\n%run can execute python code from .py files &mdash; this is a well-documented behavior. \\n\\nBut it also can execute other jupyter notebooks! Sometimes it is quite useful.\\n\\nNB. %run is not the same as importing python module.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# this will execute all the code cells from different notebooks\\n%run ./matplotlib-anatomy.ipynb\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# Using Jupyter more efficiently\\n\\n---\",\n \"_____no_output_____\"\n ],\n [\n \"## Store Magic - %store: lazy passing data between notebooks\",\n \"_____no_output_____\"\n ],\n [\n \"%store lets you store your macro and use it across all of your Jupyter Notebooks.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"data = 'this is the string I want to pass to different notebook'\\n%store data\\ndel data # deleted variable\",\n \"_____no_output_____\"\n ],\n [\n \"# in second notebook I will use:\\n%store -r data\\nprint(data)\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"## %who: analyze variables of global scope\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"%whos\",\n \"_____no_output_____\"\n ],\n [\n \"# pring names of string variables\\n%who str\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"## Multiple cursors\\n\\nSince recently jupyter supports multiple cursors (in a single cell), just like sublime ot intelliJ! __Alt + mouse selection__ for multiline selection and __Ctrl + mouse clicks__ for multicursors.\\n\\n\\n\\nGif taken from http://swanintelligence.com/multi-cursor-in-jupyter.html\",\n \"_____no_output_____\"\n ],\n [\n \"## Timing \",\n \"_____no_output_____\"\n ],\n [\n \"When you need to measure time spent or find the bottleneck in the code, ipython comes to the rescue.\",\n \"_____no_output_____\"\n ],\n [\n \"%%time\\nimport time\\ntime.sleep(2) # sleep for two seconds\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# measure small code snippets with timeit !\\nimport numpy\\n%timeit numpy.random.normal(size=100)\",\n \"_____no_output_____\"\n ],\n [\n \"%%writefile pythoncode.py\\n\\nimport numpy\\ndef append_if_not_exists(arr, x):\\n if x not in arr:\\n arr.append(x)\\n \\ndef some_useless_slow_function():\\n arr = list()\\n for i in range(10000):\\n x = numpy.random.randint(0, 10000)\\n append_if_not_exists(arr, x)\",\n \"_____no_output_____\"\n ],\n [\n \"# shows highlighted source of the newly-created file\\n%pycat pythoncode.py\",\n \"_____no_output_____\"\n ],\n [\n \"from pythoncode import some_useless_slow_function, append_if_not_exists\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"## Hiding code or output\",\n \"_____no_output_____\"\n ],\n [\n \"- Click on the blue vertical bar or line to the left to collapse code or output\",\n \"_____no_output_____\"\n ],\n [\n \"## Commenting and uncommenting a block of code\\n\\nYou might want to add new lines of code and comment out the old lines while you’re working. This is great if you’re improving the performance of your code or trying to debug it.\\n- First, select all the lines you want to comment out.\\n- Next hit cmd + / to comment out the highlighted code!\",\n \"_____no_output_____\"\n ],\n [\n \"## Pretty Print all cell outputs\\n\\nNormally only the last output in the cell will be printed. For everything else, you have to manually add print(), which is fine but not super convenient. You can change that by adding this at the top of the notebook:\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"from IPython.core.interactiveshell import InteractiveShell\\nInteractiveShell.ast_node_interactivity = \\\"all\\\"\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# Profiling: %prun, %lprun, %mprun\\n---\",\n \"_____no_output_____\"\n ],\n [\n \"See a much longer explination of profiling and timeing in Jake Vanderplas' Python Data Science Handbook: \\nhttps://jakevdp.github.io/PythonDataScienceHandbook/01.07-timing-and-profiling.html\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# shows how much time program spent in each function\\n%prun some_useless_slow_function()\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"Example of output:\\n```\\n26338 function calls in 0.713 seconds\\n\\n Ordered by: internal time\\n\\n ncalls tottime percall cumtime percall filename:lineno(function)\\n 10000 0.684 0.000 0.685 0.000 pythoncode.py:3(append_if_not_exists)\\n 10000 0.014 0.000 0.014 0.000 {method 'randint' of 'mtrand.RandomState' objects}\\n 1 0.011 0.011 0.713 0.713 pythoncode.py:7(some_useless_slow_function)\\n 1 0.003 0.003 0.003 0.003 {range}\\n 6334 0.001 0.000 0.001 0.000 {method 'append' of 'list' objects}\\n 1 0.000 0.000 0.713 0.713 :1()\\n 1 0.000 0.000 0.000 0.000 {method 'disable' of '_lsprof.Profiler' objects}\\n```\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# %load_ext memory_profiler ???\",\n \"_____no_output_____\"\n ],\n [\n \"To profile memory, you can install and run pmrun\\n\\n# %pip install memory_profiler\\n# %pip install line_profiler\",\n \"_____no_output_____\"\n ],\n [\n \"# tracking memory consumption (show in the pop-up)\\n# %mprun -f append_if_not_exists some_useless_slow_function()\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"Example of output:\\n```\\nLine # Mem usage Increment Line Contents\\n================================================\\n 3 20.6 MiB 0.0 MiB def append_if_not_exists(arr, x):\\n 4 20.6 MiB 0.0 MiB if x not in arr:\\n 5 20.6 MiB 0.0 MiB arr.append(x)\\n```\",\n \"_____no_output_____\"\n ],\n [\n \"**%lprun** is line profiling, but it seems to be broken for latest IPython release, so we'll manage without magic this time:\",\n \"_____no_output_____\"\n ],\n [\n \"```python\\nimport line_profiler\\nlp = line_profiler.LineProfiler()\\nlp.add_function(some_useless_slow_function)\\nlp.runctx('some_useless_slow_function()', locals=locals(), globals=globals())\\nlp.print_stats()\\n```\",\n \"_____no_output_____\"\n ],\n [\n \"## Debugging with %debug\\n\\nJupyter has own interface for [ipdb](https://docs.python.org/2/library/pdb.html). Makes it possible to go inside the function and investigate what happens there.\\n\\nThis is not pycharm and requires much time to adapt, but when debugging on the server this can be the only option (or use pdb from terminal).\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"#%%debug filename:line_number_for_breakpoint\\n# Here some code that fails. This will activate interactive context for debugging\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"A bit easier option is `%pdb`, which activates debugger when exception is raised:\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# %pdb\\n\\n# def pick_and_take():\\n# picked = numpy.random.randint(0, 1000)\\n# raise NotImplementedError()\\n \\n# pick_and_take()\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# Output\\n---\",\n \"_____no_output_____\"\n ],\n [\n \"## [RISE](https://github.com/damianavila/RISE): presentations with notebook\\n\\nExtension by Damian Avila makes it possible to show notebooks as demonstrations. Example of such presentation: http://bollwyvl.github.io/live_reveal/#/7\\n\\nIt is very useful when you teach others e.g. to use some library.\\n\",\n \"_____no_output_____\"\n ],\n [\n \"## Jupyter output system\\n\\nNotebooks are displayed as HTML and the cell output can be HTML, so you can return virtually anything: video/audio/images. \\n\\nIn this example I scan the folder with images in my repository and show first five of them:\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"import os\\nfrom IPython.display import display, Image\\nnames = [f for f in os.listdir('../images/') if f.endswith('.png')]\\nfor name in names[:5]:\\n display(Image('../images/' + name, width=300))\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"## Write your posts in notebooks\\n\\nLike this one. Use `nbconvert` to export them to html.\",\n \"_____no_output_____\"\n ],\n [\n \"# [Jupyter-contrib extensions](https://github.com/ipython-contrib/jupyter_contrib_nbextensions)\\n\\nare installed with \\n```\\n!pip install https://github.com/ipython-contrib/jupyter_contrib_nbextensions/tarball/master\\n!pip install jupyter_nbextensions_configurator\\n!jupyter contrib nbextension install --user\\n!jupyter nbextensions_configurator enable --user\\n```\\n\\n\\n\\nthis is a family of different extensions, including e.g. **jupyter spell-checker and code-formatter**, \\nthat are missing in jupyter by default. \",\n \"_____no_output_____\"\n ],\n [\n \"## Reconnect to kernel\\n\\nLong before, when you started some long-taking process and at some point your connection to ipython server dropped, \\nyou completely lost the ability to track the computations process (unless you wrote this information to file). So either you interrupt the kernel and potentially lose some progress, or you wait till it completes without any idea of what is happening.\\n\\n`Reconnect to kernel` option now makes it possible to connect again to running kernel without interrupting computations and get the newcoming output shown (but some part of output is already lost).\",\n \"_____no_output_____\"\n ],\n [\n \"# Big data analysis\\n\\nA number of solutions are available for querying/processing large data samples: \\n- [ipyparallel (formerly ipython cluster)](https://github.com/ipython/ipyparallel) is a good option for simple map-reduce operations in python. We use it in [rep](github.com/yandex/rep) to train many machine learning models in parallel\\n- [pyspark](http://www.cloudera.com/documentation/enterprise/5-5-x/topics/spark_ipython.html)\\n- spark-sql magic [%%sql](https://github.com/jupyter-incubator/sparkmagic)\",\n \"_____no_output_____\"\n ],\n [\n \"Additional Resources:\\n\\n* IPython [built-in magics](https://ipython.org/ipython-doc/3/interactive/magics.html)\\n* Nice [interactive presentation about jupyter](http://quasiben.github.io/dfwmeetup_2014/#/) by Ben Zaitlen\\n* Advanced notebooks [part 1: magics](https://blog.dominodatalab.com/lesser-known-ways-of-using-notebooks/) and [part 2: widgets](https://blog.dominodatalab.com/interactive-dashboards-in-jupyter/)\\n* [Profiling in python with jupyter](http://pynash.org/2013/03/06/timing-and-profiling/)\\n* [4 ways to extend notebooks](http://mindtrove.info/4-ways-to-extend-jupyter-notebook/)\\n* [IPython notebook tricks](https://www.quora.com/What-are-your-favorite-tricks-for-IPython-Notebook)\\n* [Jupyter vs Zeppelin for big data](https://www.linkedin.com/pulse/comprehensive-comparison-jupyter-vs-zeppelin-hoc-q-phan-mba-)\\n* [Making publication ready Python notebooks](http://blog.juliusschulz.de/blog/ultimate-ipython-notebook).\\n* https://yoursdata.net/installing-and-configuring-jupyter-lab-on-windows/\",\n \"_____no_output_____\"\n ]\n ]\n]"},"cell_types":{"kind":"list like","value":["markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown"],"string":"[\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\"\n]"},"cell_type_groups":{"kind":"list like","value":[["markdown","markdown","markdown","markdown","markdown","markdown","markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown","markdown"],["code","code"],["markdown","markdown","markdown","markdown"],["code","code","code","code","code","code"],["markdown"],["code"],["markdown"],["code"],["markdown","markdown"],["code","code"],["markdown"],["code","code"],["markdown","markdown"],["code"],["markdown"],["code"],["markdown"],["code","code","code","code"],["markdown"],["code","code","code","code"],["markdown","markdown","markdown","markdown"],["code"],["markdown","markdown"],["code"],["markdown"],["code"],["markdown","markdown","markdown"],["code","code"],["markdown"],["code","code"],["markdown","markdown","markdown","markdown"],["code","code","code","code"],["markdown","markdown","markdown","markdown"],["code"],["markdown","markdown"],["code"],["markdown"],["code","code","code"],["markdown","markdown","markdown","markdown"],["code"],["markdown"],["code"],["markdown","markdown","markdown"],["code"],["markdown","markdown","markdown","markdown","markdown"]],"string":"[\n [\n \"markdown\",\n \"markdown\",\n \"markdown\",\n \"markdown\",\n \"markdown\",\n \"markdown\",\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\",\n \"markdown\"\n ],\n [\n \"code\",\n \"code\"\n ],\n [\n \"markdown\",\n \"markdown\",\n \"markdown\",\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\",\n \"markdown\"\n ],\n [\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\"\n ],\n [\n \"markdown\",\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\",\n \"code\"\n ],\n [\n \"markdown\",\n \"markdown\",\n \"markdown\",\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\",\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\",\n \"markdown\",\n \"markdown\"\n ],\n [\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\"\n ],\n [\n \"markdown\",\n \"markdown\",\n \"markdown\",\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\",\n \"code\"\n ],\n [\n \"markdown\",\n \"markdown\",\n \"markdown\",\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\",\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\"\n ],\n [\n \"markdown\",\n \"markdown\",\n \"markdown\",\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\",\n \"markdown\",\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\",\n \"markdown\",\n \"markdown\",\n \"markdown\",\n \"markdown\"\n ]\n]"}}},{"rowIdx":1459051,"cells":{"hexsha":{"kind":"string","value":"e7eefbe0e8b565b9cfa0de569f21745e1042f248"},"size":{"kind":"number","value":8740,"string":"8,740"},"ext":{"kind":"string","value":"ipynb"},"lang":{"kind":"string","value":"Jupyter Notebook"},"max_stars_repo_path":{"kind":"string","value":"Operations_and_Expressions_in_Python.ipynb"},"max_stars_repo_name":{"kind":"string","value":"michaelll22/CPEN-21A-ECE-2-2"},"max_stars_repo_head_hexsha":{"kind":"string","value":"be35c2f8e0ee6446c377bafcc79b03caaab56fe7"},"max_stars_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"Operations_and_Expressions_in_Python.ipynb"},"max_issues_repo_name":{"kind":"string","value":"michaelll22/CPEN-21A-ECE-2-2"},"max_issues_repo_head_hexsha":{"kind":"string","value":"be35c2f8e0ee6446c377bafcc79b03caaab56fe7"},"max_issues_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"Operations_and_Expressions_in_Python.ipynb"},"max_forks_repo_name":{"kind":"string","value":"michaelll22/CPEN-21A-ECE-2-2"},"max_forks_repo_head_hexsha":{"kind":"string","value":"be35c2f8e0ee6446c377bafcc79b03caaab56fe7"},"max_forks_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"avg_line_length":{"kind":"number","value":21.4215686275,"string":"21.421569"},"max_line_length":{"kind":"number","value":261,"string":"261"},"alphanum_fraction":{"kind":"number","value":0.371395881,"string":"0.371396"},"cells":{"kind":"list like","value":[[["\"Open","_____no_output_____"],["##Boolean Operators","_____no_output_____"]],[["a = 10\nb = 9\nc = 8\n\nprint (10 > 9)\nprint (10 == 9)\nprint (10 < 9)\n\nprint (a)\n\nprint (a > b)\nc = print (a > b)\n\nc","True\nFalse\nFalse\n10\nTrue\nTrue\n"],["##true\nprint(bool(\"Hello\"))\nprint(bool(15))\nprint(bool(True))\nprint(bool(1))\n\n##false\nprint(bool(False))\nprint(bool(0))\nprint(bool(None))\nprint(bool([]))","True\nTrue\nTrue\nFalse\nTrue\nFalse\nFalse\nFalse\n"],["def myFunction(): \n return True\n\nprint(myFunction())","True\n"],["def myFunction():\n return True\n\nif myFunction():\n print(\"Yes/True\")\nelse:\n print(\"No/Flase\")\n","Yes\n"],["print(10>9)\n\na = 6 #0000 0110\nb = 7 #0000 0111\n\nprint(a == b)\nprint(a != b)","True\nFalse\nTrue\n"]],[["##Python Operators","_____no_output_____"]],[["print(10 + 5)\nprint(10 - 5)\nprint(10 * 5)\nprint(10 / 5)\nprint(10 % 5)\nprint(10 // 3)\nprint(10 ** 2)","15\n5\n50\n2.0\n0\n3\n100\n"]],[["##Bitwise Operators","_____no_output_____"]],[["a = 60 #0011 1100\nb = 13 \n\nprint (a^b)\nprint (~a)\nprint (a<<2)\nprint (a>>2) #0000 1111","49\n-61\n240\n15\n"]],[["##Assignment Operator","_____no_output_____"]],[["x = 2\nx += 3 #Same As x = x+3\nprint(x)\nx","5\n"]],[["##Logical Operators","_____no_output_____"]],[["a = 5\nb = 6\n\nprint(a>b and a==a)\nprint(a\\\"Open\",\n \"_____no_output_____\"\n ],\n [\n \"##Boolean Operators\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"a = 10\\nb = 9\\nc = 8\\n\\nprint (10 > 9)\\nprint (10 == 9)\\nprint (10 < 9)\\n\\nprint (a)\\n\\nprint (a > b)\\nc = print (a > b)\\n\\nc\",\n \"True\\nFalse\\nFalse\\n10\\nTrue\\nTrue\\n\"\n ],\n [\n \"##true\\nprint(bool(\\\"Hello\\\"))\\nprint(bool(15))\\nprint(bool(True))\\nprint(bool(1))\\n\\n##false\\nprint(bool(False))\\nprint(bool(0))\\nprint(bool(None))\\nprint(bool([]))\",\n \"True\\nTrue\\nTrue\\nFalse\\nTrue\\nFalse\\nFalse\\nFalse\\n\"\n ],\n [\n \"def myFunction(): \\n return True\\n\\nprint(myFunction())\",\n \"True\\n\"\n ],\n [\n \"def myFunction():\\n return True\\n\\nif myFunction():\\n print(\\\"Yes/True\\\")\\nelse:\\n print(\\\"No/Flase\\\")\\n\",\n \"Yes\\n\"\n ],\n [\n \"print(10>9)\\n\\na = 6 #0000 0110\\nb = 7 #0000 0111\\n\\nprint(a == b)\\nprint(a != b)\",\n \"True\\nFalse\\nTrue\\n\"\n ]\n ],\n [\n [\n \"##Python Operators\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"print(10 + 5)\\nprint(10 - 5)\\nprint(10 * 5)\\nprint(10 / 5)\\nprint(10 % 5)\\nprint(10 // 3)\\nprint(10 ** 2)\",\n \"15\\n5\\n50\\n2.0\\n0\\n3\\n100\\n\"\n ]\n ],\n [\n [\n \"##Bitwise Operators\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"a = 60 #0011 1100\\nb = 13 \\n\\nprint (a^b)\\nprint (~a)\\nprint (a<<2)\\nprint (a>>2) #0000 1111\",\n \"49\\n-61\\n240\\n15\\n\"\n ]\n ],\n [\n [\n \"##Assignment Operator\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"x = 2\\nx += 3 #Same As x = x+3\\nprint(x)\\nx\",\n \"5\\n\"\n ]\n ],\n [\n [\n \"##Logical Operators\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"a = 5\\nb = 6\\n\\nprint(a>b and a==a)\\nprint(a {(state, action): counts}","_____no_output_____"],["agent_names = ('CustomPlayer1', 'CustomPlayer2')\nagent1 = isolation.Agent(custom.CustomPlayer, agent_names[0])\nagent2 = isolation.Agent(custom.CustomPlayer, agent_names[1])\nagents = (agent1, agent2)\n\nstate = isolation.isolation.Isolation()\ntime_limit = 150\nmatch_id = 0\n\ntic = time.time()\nwinner, game_history, match_id = isolation.play((agents,\n state,\n time_limit,\n match_id))\ntoc = time.time()\nprint('Elapsed time: {}'.format((toc-tic)))","Elapsed time: 11.986933946609497\n"],["root = isolation.isolation.Isolation()\nopening_states = list(b.get_full_states(root, depth=2))\nprint(type(opening_states))\nprint(len(opening_states))","\n9801\n"],["len([s for s in opening_states if s.ply_count==1])","_____no_output_____"],["[s for s in opening_states if s.ply_count==0]","_____no_output_____"],["99*99","_____no_output_____"],["opening_states[0]","_____no_output_____"]],[["### Let's generate the corresponding matches","_____no_output_____"]],[["# Constant parameteres\ntime_limit = 150\ndepth = 4\nfull_search_depth = 2\nmatches_per_opening = 3\n\n# Create the agents that will play\nagent_names = ('CustomPlayer1', 'CustomPlayer2')\nagent1 = isolation.Agent(custom.CustomPlayer, agent_names[0])\nagent2 = isolation.Agent(custom.CustomPlayer, agent_names[1])\nagents = (agent1, agent2)\n\n# Get the initial states\nroot = isolation.isolation.Isolation()\nopening_states = list(b.get_full_states(root, depth=full_search_depth))\n\n# Generate the matches\nmatches = [(agents, state, time_limit, match_id) \n for match_id, state in enumerate(opening_states)]\nmatches = matches * 3\nprint('Generated {} matches.'.format(len(matches)))\n\n# Create or load the book\nbook = b.load_latest_book(depth=depth)","Generated 29403 matches.\n"],["matches[0]","_____no_output_____"],["def active_player(state):\n return state.ply_count % 2","_____no_output_____"],["active_player(matches[0][1])","_____no_output_____"],["batch_size = 10\nx = list(range(10,45))\nbatches = [x[i*batch_size:(i+1)*batch_size] \n for i in range(len(x) // batch_size + (len(x) % batch_size != 0))]\nbatches","_____no_output_____"],["l = [1,2,3,445]","_____no_output_____"],["isinstance(l[3], int)","_____no_output_____"],["l.insert(0,45)\nl","_____no_output_____"],["from multiprocessing.pool import ThreadPool as Pool\nnum_processes = 1\nbatch_size = 10\n\n# Small test for debugging\nmatches = matches[:10]\n\nresults = []\npool = Pool(num_processes)\ntic = time.time()\nfor result in pool.imap_unordered(isolation.play, matches):\n results.append(result)\n winner, game_history, match_id = result\n print('Results for match {}: {} wins.'.format(match_id, winner.name))\n _, state, _, _ = matches[match_id]\n if state.locs[1] is not None:\n game_history.insert(0,state.locs[1])\n if state.locs[0] is not None:\n game_history.insert(0,state.locs[0])\n root = isolation.isolation.Isolation()\n print(game_history)\n b.process_game_history(root,\n game_history, \n book,\n agent_names.index(winner.name),\n active_player=state.ply_count % 2,\n depth=depth)\ntoc = time.time()\nprint('Elapsed time {} seconds.'.format((toc-tic)))","Results for match 0: CustomPlayer1 wins.\n[84, 56, , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , ]\nState: Isolation(board=41523161203939122082683632224299007, ply_count=0, locs=(None, None)) \n Action: 84\n\n\nGot an int action: loc_sym = 82\nGot an int action: loc_sym = 32\nGot an int action: loc_sym = 30\nState: Isolation(board=41523161184596308968849565429000191, ply_count=1, locs=(84, None)) \n Action: 56\n\n\nGot an int action: loc_sym = 58\nGot an int action: loc_sym = 56\nGot an int action: loc_sym = 58\nState: Isolation(board=41523161184596308896791971391072255, ply_count=2, locs=(84, 56)) \n Action: 11\n\n\nState: Isolation(board=41523121570515051764623174619097087, ply_count=3, locs=(95, 56)) \n Action: 25\n\n\nResults for match 1: CustomPlayer2 wins.\n[18, 79, , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , ]\nState: Isolation(board=41523161203939122082683632224299007, ply_count=0, locs=(None, None)) \n Action: 18\n\n\nGot an int action: loc_sym = 18\nGot an int action: loc_sym = 96\nGot an int action: loc_sym = 96\nState: Isolation(board=41523161203939122082683632224036863, ply_count=1, locs=(18, None)) \n Action: 79\n\n\nGot an int action: loc_sym = 87\nGot an int action: loc_sym = 27\nGot an int action: loc_sym = 35\nState: Isolation(board=41523161203334659172876317636683775, ply_count=2, locs=(18, 79)) \n Action: 25\n\n\nState: Isolation(board=41523161203334659172867521543661567, ply_count=3, locs=(43, 79)) \n Action: -27\n\n\n\nResults for match 2: CustomPlayer1 wins.\n[81, 29, , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , ]\nState: Isolation(board=41523161203939122082683632224299007, ply_count=0, locs=(None, None)) \n Action: 81\n\n\nGot an int action: loc_sym = 85\nGot an int action: loc_sym = 29\nGot an int action: loc_sym = 33\nState: Isolation(board=41523161201521270443454373874886655, ply_count=1, locs=(81, None)) \n Action: 29\n\n\nGot an int action: loc_sym = 33\nGot an int action: loc_sym = 81\nGot an int action: loc_sym = 85\nState: Isolation(board=41523161201521270443454373338015743, ply_count=2, locs=(81, 29)) \n Action: 25\n\n\nState: Isolation(board=41442031563106663761758584332871679, ply_count=3, locs=(106, 29)) \n Action: 25\n\n\nResults for match 3: CustomPlayer2 wins.\n[108, 2, , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , ]\nState: Isolation(board=41523161203939122082683632224299007, ply_count=0, locs=(None, None)) \n Action: 108\n\n\nGot an int action: loc_sym = 110\nGot an int action: loc_sym = 4\nGot an int action: loc_sym = 6\nState: Isolation(board=41198642650280695355900476203722751, ply_count=1, locs=(108, None)) \n Action: 2\n\n\nGot an int action: loc_sym = 8\nGot an int action: loc_sym = 106\nGot an int action: loc_sym = 112\nState: Isolation(board=41198642650280695355900476203722747, ply_count=2, locs=(108, 2)) \n Action: -15\n\n\nState: Isolation(board=41198632746760381072858277010728955, ply_count=3, locs=(93, 2)) \n Action: 25\n\n\nResults for match 4: CustomPlayer2 wins.\n[58, 71, , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , ]\nState: Isolation(board=41523161203939122082683632224299007, ply_count=0, locs=(None, None)) \n Action: 58\n\n\nGot an int action: loc_sym = 56\nGot an int action: loc_sym = 58\nGot an int action: loc_sym = 56\nState: Isolation(board=41523161203939121794453256072587263, ply_count=1, locs=(58, None)) \n Action: 71\n\n\nGot an int action: loc_sym = 69\nGot an int action: loc_sym = 45\nGot an int action: loc_sym = 43\nState: Isolation(board=41523161203936760611211821249980415, ply_count=2, locs=(58, 71)) \n Action: 25\n\n\nState: Isolation(board=41523161194265354054294787852331007, ply_count=3, locs=(83, 71)) \n Action: 11\n\n\n"],["sum(abs(value) for value in book.values())","_____no_output_____"],["seconds = 29403 * 37 / 10\nprint('{} seconds'.format(seconds))\nprint('{} hours'.format(seconds/3600))","108791.1 seconds\n30.21975 hours\n"],["game_history","_____no_output_____"]],[["## Let's add the symmetry conditions to the game processing","_____no_output_____"]],[["s_a = list(book.keys())[0]\ns_a","_____no_output_____"],["W, H = 11, 9\n\ndef h_symmetry(loc):\n if loc is None:\n return None\n row = loc // (W + 2)\n center = W + (row - 1) * (W + 2) + (W + 2) // 2 + 1 if row != 0 else W // 2\n return 2 * center - loc","_____no_output_____"],["h_symmetry(28)","2\n31\n"],["h_symmetry(1)","0\n5\n"],["center = (H // 2) * (W + 2) + W // 2\ncenter","_____no_output_____"],["def c_symmetry(loc):\n if loc is None:\n return None\n center = (H // 2) * (W + 2) + W // 2\n return 2 * center - loc","_____no_output_____"],["c_symmetry(81)","_____no_output_____"],["c_symmetry(67)","_____no_output_____"],["def v_symmetry(loc):\n if loc is None:\n return None\n col = loc % (W + 2)\n center = (H // 2) * (W + 2) + col\n return 2 * center - loc","_____no_output_____"],["v_symmetry(2)","_____no_output_____"],["v_symmetry(28)","_____no_output_____"],["v_symmetry(48)","_____no_output_____"],["v_symmetry(86)","_____no_output_____"],["symmetric = b.sym_sa(s_a, loc_sym=h_symmetry, cardinal_sym=b.cardinal_sym_h)\nsymmetric","8\n109\n6\n83\n"],["print(isolation.DebugState.from_state(s_a[0]))","\n+ - + - + - + - + - + - + - + - + - + - + - +\n| | | | | | | 1 | | | | |\n+ - + - + - + - + - + - + - + - + - + - + - +\n| | | | | | | | | X | | |\n+ - + - + - + - + - + - + - + - + - + - + - +\n| | | | | | 2 | | | | | |\n+ - + - + - + - + - + - + - + - + - + - + - +\n| | | | | | | | | | | |\n+ - + - + - + - + - + - + - + - + - + - + - +\n| | | | | | | | | | | |\n+ - + - + - + - + - + - + - + - + - + - + - +\n| | | | | | | | | | | |\n+ - + - + - + - + - + - + - + - + - + - + - +\n| | | | | | | | | | | |\n+ - + - + - + - + - + - + - + - + - + - + - +\n| | | | | | | | | | | |\n+ - + - + - + - + - + - + - + - + - + - + - +\n| | | | | | | | | | | |\n+ - + - + - + - + - + - + - + - + - + - + - +\n\n"],["print(isolation.DebugState.from_state(symmetric[0]))","\n+ - + - + - + - + - + - + - + - + - + - + - +\n| | | | | 1 | | | | | | |\n+ - + - + - + - + - + - + - + - + - + - + - +\n| | | | | | | | | | | |\n+ - + - + - + - + - + - + - + - + - + - + - +\n| | | | | | 2 | | | | | |\n+ - + - + - + - + - + - + - + - + - + - + - +\n| | | | | | | | | | | |\n+ - + - + - + - + - + - + - + - + - + - + - +\n| | | | | | | | | | | |\n+ - + - + - + - + - + - + - + - + - + - + - +\n| | | | | | | | | | | |\n+ - + - + - + - + - + - + - + - + - + - + - +\n| | | | | | | | | | | |\n+ - + - + - + - + - + - + - + - + - + - + - +\n| | | | | | | | | | | |\n+ - + - + - + - + - + - + - + - + - + - + - +\n| | | | | | | | | | | |\n+ - + - + - + - + - + - + - + - + - + - + - +\n\n"],["def process_game_history(state,\n game_history,\n book,\n winner_id,\n active_player=0,\n depth=4):\n \"\"\" Given an initial state, and a list of actions, this function iterates\n through the resulting states of the actions and updates count of wins in\n the state/action book\"\"\"\n OPENING_MOVES = 2\n game_value = 2 * (active_player == winner_id) - 1\n curr_state = state # It is a named tuple, so I think it is immutable. No need to copy.\n for num_action, action in enumerate(game_history):\n if (curr_state, action) in book.keys():\n book[(curr_state, action)] += game_value\n if curr_state.ply_count <= OPENING_MOVES:\n book[b.sym_sa((curr_state, action), \n loc_sym=h_symmetry,\n cardinal_sym=b.cardinal_sym_h)] += game_value\n book[b.sym_sa((curr_state, action), \n loc_sym=v_symmetry,\n cardinal_sym=b.cardinal_sym_v)] += game_value\n book[b.sym_sa((curr_state, action), \n loc_sym=c_symmetry,\n cardinal_sym=b.cardinal_sym_c)] += game_value\n curr_state = curr_state.result(action)\n active_player = 1 - active_player\n game_value = 2 * (active_player == winner_id) - 1\n # Break on depth equal to book\n if num_action >= depth - 1:\n break","_____no_output_____"]]],"string":"[\n [\n [\n \"import matplotlib.pyplot as plt\\nimport pandas as pd\\nimport numpy as np\\nimport sys\\nfrom time import time\\n\\n%pylab inline\\npylab.rcParams['figure.figsize'] = (20.0, 10.0)\\n\\n%load_ext autoreload\\n%autoreload 2\\n\\nsys.path.append('..')\\n\\nimport isolation\\nimport sample_players\\nimport run_match\\nimport my_baseline_player as custom\\nimport book as b\",\n \"Populating the interactive namespace from numpy and matplotlib\\nThe autoreload extension is already loaded. To reload it, use:\\n %reload_ext autoreload\\n\"\n ]\n ],\n [\n [\n \"## I estimate 10s per game. 100 starting positions, 100 secondary starting positions, then 10000 openings. 4 threads, and symmetries that produce x4 data. If I want 12 points per opening, then that would be:\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"estimated_seconds = 10000 * 12 * 10/ (4 * 4)\\nestimated_hours = estimated_seconds / 3600\\nprint(estimated_hours)\",\n \"20.833333333333332\\n\"\n ]\n ],\n [\n [\n \"### The plan is as follows:\\n - Create a book (or load previously saved)\\n - for each starting action for player 1 (100) and each starting action for player 2 (100) run 3 experiments (DETERMINISTIC BOOK FILLING).\\n - Run epsilon-greedy algorithm to make a STOCHASTIC BOOK FILLING (using the opening book up to its depth [1-epsilon of the time]). Reduce epsilon exponentially to zero.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"book = b.load_latest_book(depth=4)\",\n \"_____no_output_____\"\n ],\n [\n \"type(book)\",\n \"_____no_output_____\"\n ],\n [\n \"sum(abs(value) for value in book.values())\",\n \"_____no_output_____\"\n ],\n [\n \"#book # book -> {(state, action): counts}\",\n \"_____no_output_____\"\n ],\n [\n \"agent_names = ('CustomPlayer1', 'CustomPlayer2')\\nagent1 = isolation.Agent(custom.CustomPlayer, agent_names[0])\\nagent2 = isolation.Agent(custom.CustomPlayer, agent_names[1])\\nagents = (agent1, agent2)\\n\\nstate = isolation.isolation.Isolation()\\ntime_limit = 150\\nmatch_id = 0\\n\\ntic = time.time()\\nwinner, game_history, match_id = isolation.play((agents,\\n state,\\n time_limit,\\n match_id))\\ntoc = time.time()\\nprint('Elapsed time: {}'.format((toc-tic)))\",\n \"Elapsed time: 11.986933946609497\\n\"\n ],\n [\n \"root = isolation.isolation.Isolation()\\nopening_states = list(b.get_full_states(root, depth=2))\\nprint(type(opening_states))\\nprint(len(opening_states))\",\n \"\\n9801\\n\"\n ],\n [\n \"len([s for s in opening_states if s.ply_count==1])\",\n \"_____no_output_____\"\n ],\n [\n \"[s for s in opening_states if s.ply_count==0]\",\n \"_____no_output_____\"\n ],\n [\n \"99*99\",\n \"_____no_output_____\"\n ],\n [\n \"opening_states[0]\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"### Let's generate the corresponding matches\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# Constant parameteres\\ntime_limit = 150\\ndepth = 4\\nfull_search_depth = 2\\nmatches_per_opening = 3\\n\\n# Create the agents that will play\\nagent_names = ('CustomPlayer1', 'CustomPlayer2')\\nagent1 = isolation.Agent(custom.CustomPlayer, agent_names[0])\\nagent2 = isolation.Agent(custom.CustomPlayer, agent_names[1])\\nagents = (agent1, agent2)\\n\\n# Get the initial states\\nroot = isolation.isolation.Isolation()\\nopening_states = list(b.get_full_states(root, depth=full_search_depth))\\n\\n# Generate the matches\\nmatches = [(agents, state, time_limit, match_id) \\n for match_id, state in enumerate(opening_states)]\\nmatches = matches * 3\\nprint('Generated {} matches.'.format(len(matches)))\\n\\n# Create or load the book\\nbook = b.load_latest_book(depth=depth)\",\n \"Generated 29403 matches.\\n\"\n ],\n [\n \"matches[0]\",\n \"_____no_output_____\"\n ],\n [\n \"def active_player(state):\\n return state.ply_count % 2\",\n \"_____no_output_____\"\n ],\n [\n \"active_player(matches[0][1])\",\n \"_____no_output_____\"\n ],\n [\n \"batch_size = 10\\nx = list(range(10,45))\\nbatches = [x[i*batch_size:(i+1)*batch_size] \\n for i in range(len(x) // batch_size + (len(x) % batch_size != 0))]\\nbatches\",\n \"_____no_output_____\"\n ],\n [\n \"l = [1,2,3,445]\",\n \"_____no_output_____\"\n ],\n [\n \"isinstance(l[3], int)\",\n \"_____no_output_____\"\n ],\n [\n \"l.insert(0,45)\\nl\",\n \"_____no_output_____\"\n ],\n [\n \"from multiprocessing.pool import ThreadPool as Pool\\nnum_processes = 1\\nbatch_size = 10\\n\\n# Small test for debugging\\nmatches = matches[:10]\\n\\nresults = []\\npool = Pool(num_processes)\\ntic = time.time()\\nfor result in pool.imap_unordered(isolation.play, matches):\\n results.append(result)\\n winner, game_history, match_id = result\\n print('Results for match {}: {} wins.'.format(match_id, winner.name))\\n _, state, _, _ = matches[match_id]\\n if state.locs[1] is not None:\\n game_history.insert(0,state.locs[1])\\n if state.locs[0] is not None:\\n game_history.insert(0,state.locs[0])\\n root = isolation.isolation.Isolation()\\n print(game_history)\\n b.process_game_history(root,\\n game_history, \\n book,\\n agent_names.index(winner.name),\\n active_player=state.ply_count % 2,\\n depth=depth)\\ntoc = time.time()\\nprint('Elapsed time {} seconds.'.format((toc-tic)))\",\n \"Results for match 0: CustomPlayer1 wins.\\n[84, 56, , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , ]\\nState: Isolation(board=41523161203939122082683632224299007, ply_count=0, locs=(None, None)) \\n Action: 84\\n\\n\\nGot an int action: loc_sym = 82\\nGot an int action: loc_sym = 32\\nGot an int action: loc_sym = 30\\nState: Isolation(board=41523161184596308968849565429000191, ply_count=1, locs=(84, None)) \\n Action: 56\\n\\n\\nGot an int action: loc_sym = 58\\nGot an int action: loc_sym = 56\\nGot an int action: loc_sym = 58\\nState: Isolation(board=41523161184596308896791971391072255, ply_count=2, locs=(84, 56)) \\n Action: 11\\n\\n\\nState: Isolation(board=41523121570515051764623174619097087, ply_count=3, locs=(95, 56)) \\n Action: 25\\n\\n\\nResults for match 1: CustomPlayer2 wins.\\n[18, 79, , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , ]\\nState: Isolation(board=41523161203939122082683632224299007, ply_count=0, locs=(None, None)) \\n Action: 18\\n\\n\\nGot an int action: loc_sym = 18\\nGot an int action: loc_sym = 96\\nGot an int action: loc_sym = 96\\nState: Isolation(board=41523161203939122082683632224036863, ply_count=1, locs=(18, None)) \\n Action: 79\\n\\n\\nGot an int action: loc_sym = 87\\nGot an int action: loc_sym = 27\\nGot an int action: loc_sym = 35\\nState: Isolation(board=41523161203334659172876317636683775, ply_count=2, locs=(18, 79)) \\n Action: 25\\n\\n\\nState: Isolation(board=41523161203334659172867521543661567, ply_count=3, locs=(43, 79)) \\n Action: -27\\n\\n\\n\\nResults for match 2: CustomPlayer1 wins.\\n[81, 29, , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , ]\\nState: Isolation(board=41523161203939122082683632224299007, ply_count=0, locs=(None, None)) \\n Action: 81\\n\\n\\nGot an int action: loc_sym = 85\\nGot an int action: loc_sym = 29\\nGot an int action: loc_sym = 33\\nState: Isolation(board=41523161201521270443454373874886655, ply_count=1, locs=(81, None)) \\n Action: 29\\n\\n\\nGot an int action: loc_sym = 33\\nGot an int action: loc_sym = 81\\nGot an int action: loc_sym = 85\\nState: Isolation(board=41523161201521270443454373338015743, ply_count=2, locs=(81, 29)) \\n Action: 25\\n\\n\\nState: Isolation(board=41442031563106663761758584332871679, ply_count=3, locs=(106, 29)) \\n Action: 25\\n\\n\\nResults for match 3: CustomPlayer2 wins.\\n[108, 2, , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , ]\\nState: Isolation(board=41523161203939122082683632224299007, ply_count=0, locs=(None, None)) \\n Action: 108\\n\\n\\nGot an int action: loc_sym = 110\\nGot an int action: loc_sym = 4\\nGot an int action: loc_sym = 6\\nState: Isolation(board=41198642650280695355900476203722751, ply_count=1, locs=(108, None)) \\n Action: 2\\n\\n\\nGot an int action: loc_sym = 8\\nGot an int action: loc_sym = 106\\nGot an int action: loc_sym = 112\\nState: Isolation(board=41198642650280695355900476203722747, ply_count=2, locs=(108, 2)) \\n Action: -15\\n\\n\\nState: Isolation(board=41198632746760381072858277010728955, ply_count=3, locs=(93, 2)) \\n Action: 25\\n\\n\\nResults for match 4: CustomPlayer2 wins.\\n[58, 71, , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , ]\\nState: Isolation(board=41523161203939122082683632224299007, ply_count=0, locs=(None, None)) \\n Action: 58\\n\\n\\nGot an int action: loc_sym = 56\\nGot an int action: loc_sym = 58\\nGot an int action: loc_sym = 56\\nState: Isolation(board=41523161203939121794453256072587263, ply_count=1, locs=(58, None)) \\n Action: 71\\n\\n\\nGot an int action: loc_sym = 69\\nGot an int action: loc_sym = 45\\nGot an int action: loc_sym = 43\\nState: Isolation(board=41523161203936760611211821249980415, ply_count=2, locs=(58, 71)) \\n Action: 25\\n\\n\\nState: Isolation(board=41523161194265354054294787852331007, ply_count=3, locs=(83, 71)) \\n Action: 11\\n\\n\\n\"\n ],\n [\n \"sum(abs(value) for value in book.values())\",\n \"_____no_output_____\"\n ],\n [\n \"seconds = 29403 * 37 / 10\\nprint('{} seconds'.format(seconds))\\nprint('{} hours'.format(seconds/3600))\",\n \"108791.1 seconds\\n30.21975 hours\\n\"\n ],\n [\n \"game_history\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"## Let's add the symmetry conditions to the game processing\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"s_a = list(book.keys())[0]\\ns_a\",\n \"_____no_output_____\"\n ],\n [\n \"W, H = 11, 9\\n\\ndef h_symmetry(loc):\\n if loc is None:\\n return None\\n row = loc // (W + 2)\\n center = W + (row - 1) * (W + 2) + (W + 2) // 2 + 1 if row != 0 else W // 2\\n return 2 * center - loc\",\n \"_____no_output_____\"\n ],\n [\n \"h_symmetry(28)\",\n \"2\\n31\\n\"\n ],\n [\n \"h_symmetry(1)\",\n \"0\\n5\\n\"\n ],\n [\n \"center = (H // 2) * (W + 2) + W // 2\\ncenter\",\n \"_____no_output_____\"\n ],\n [\n \"def c_symmetry(loc):\\n if loc is None:\\n return None\\n center = (H // 2) * (W + 2) + W // 2\\n return 2 * center - loc\",\n \"_____no_output_____\"\n ],\n [\n \"c_symmetry(81)\",\n \"_____no_output_____\"\n ],\n [\n \"c_symmetry(67)\",\n \"_____no_output_____\"\n ],\n [\n \"def v_symmetry(loc):\\n if loc is None:\\n return None\\n col = loc % (W + 2)\\n center = (H // 2) * (W + 2) + col\\n return 2 * center - loc\",\n \"_____no_output_____\"\n ],\n [\n \"v_symmetry(2)\",\n \"_____no_output_____\"\n ],\n [\n \"v_symmetry(28)\",\n \"_____no_output_____\"\n ],\n [\n \"v_symmetry(48)\",\n \"_____no_output_____\"\n ],\n [\n \"v_symmetry(86)\",\n \"_____no_output_____\"\n ],\n [\n \"symmetric = b.sym_sa(s_a, loc_sym=h_symmetry, cardinal_sym=b.cardinal_sym_h)\\nsymmetric\",\n \"8\\n109\\n6\\n83\\n\"\n ],\n [\n \"print(isolation.DebugState.from_state(s_a[0]))\",\n \"\\n+ - + - + - + - + - + - + - + - + - + - + - +\\n| | | | | | | 1 | | | | |\\n+ - + - + - + - + - + - + - + - + - + - + - +\\n| | | | | | | | | X | | |\\n+ - + - + - + - + - + - + - + - + - + - + - +\\n| | | | | | 2 | | | | | |\\n+ - + - + - + - + - + - + - + - + - + - + - +\\n| | | | | | | | | | | |\\n+ - + - + - + - + - + - + - + - + - + - + - +\\n| | | | | | | | | | | |\\n+ - + - + - + - + - + - + - + - + - + - + - +\\n| | | | | | | | | | | |\\n+ - + - + - + - + - + - + - + - + - + - + - +\\n| | | | | | | | | | | |\\n+ - + - + - + - + - + - + - + - + - + - + - +\\n| | | | | | | | | | | |\\n+ - + - + - + - + - + - + - + - + - + - + - +\\n| | | | | | | | | | | |\\n+ - + - + - + - + - + - + - + - + - + - + - +\\n\\n\"\n ],\n [\n \"print(isolation.DebugState.from_state(symmetric[0]))\",\n \"\\n+ - + - + - + - + - + - + - + - + - + - + - +\\n| | | | | 1 | | | | | | |\\n+ - + - + - + - + - + - + - + - + - + - + - +\\n| | | | | | | | | | | |\\n+ - + - + - + - + - + - + - + - + - + - + - +\\n| | | | | | 2 | | | | | |\\n+ - + - + - + - + - + - + - + - + - + - + - +\\n| | | | | | | | | | | |\\n+ - + - + - + - + - + - + - + - + - + - + - +\\n| | | | | | | | | | | |\\n+ - + - + - + - + - + - + - + - + - + - + - +\\n| | | | | | | | | | | |\\n+ - + - + - + - + - + - + - + - + - + - + - +\\n| | | | | | | | | | | |\\n+ - + - + - + - + - + - + - + - + - + - + - +\\n| | | | | | | | | | | |\\n+ - + - + - + - + - + - + - + - + - + - + - +\\n| | | | | | | | | | | |\\n+ - + - + - + - + - + - + - + - + - + - + - +\\n\\n\"\n ],\n [\n \"def process_game_history(state,\\n game_history,\\n book,\\n winner_id,\\n active_player=0,\\n depth=4):\\n \\\"\\\"\\\" Given an initial state, and a list of actions, this function iterates\\n through the resulting states of the actions and updates count of wins in\\n the state/action book\\\"\\\"\\\"\\n OPENING_MOVES = 2\\n game_value = 2 * (active_player == winner_id) - 1\\n curr_state = state # It is a named tuple, so I think it is immutable. No need to copy.\\n for num_action, action in enumerate(game_history):\\n if (curr_state, action) in book.keys():\\n book[(curr_state, action)] += game_value\\n if curr_state.ply_count <= OPENING_MOVES:\\n book[b.sym_sa((curr_state, action), \\n loc_sym=h_symmetry,\\n cardinal_sym=b.cardinal_sym_h)] += game_value\\n book[b.sym_sa((curr_state, action), \\n loc_sym=v_symmetry,\\n cardinal_sym=b.cardinal_sym_v)] += game_value\\n book[b.sym_sa((curr_state, action), \\n loc_sym=c_symmetry,\\n cardinal_sym=b.cardinal_sym_c)] += game_value\\n curr_state = curr_state.result(action)\\n active_player = 1 - active_player\\n game_value = 2 * (active_player == winner_id) - 1\\n # Break on depth equal to book\\n if num_action >= depth - 1:\\n break\",\n \"_____no_output_____\"\n ]\n ]\n]"},"cell_types":{"kind":"list like","value":["code","markdown","code","markdown","code","markdown","code","markdown","code"],"string":"[\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\"\n]"},"cell_type_groups":{"kind":"list like","value":[["code"],["markdown"],["code"],["markdown"],["code","code","code","code","code","code","code","code","code","code"],["markdown"],["code","code","code","code","code","code","code","code","code","code","code","code"],["markdown"],["code","code","code","code","code","code","code","code","code","code","code","code","code","code","code","code","code"]],"string":"[\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\"\n ]\n]"}}},{"rowIdx":1459053,"cells":{"hexsha":{"kind":"string","value":"e7ef21cc64dab21f816726583743ea5083d54147"},"size":{"kind":"number","value":29315,"string":"29,315"},"ext":{"kind":"string","value":"ipynb"},"lang":{"kind":"string","value":"Jupyter Notebook"},"max_stars_repo_path":{"kind":"string","value":"docs/notebooks/sdba.ipynb"},"max_stars_repo_name":{"kind":"string","value":"Ouranosinc/dcvar"},"max_stars_repo_head_hexsha":{"kind":"string","value":"0737c66a36f8969e7a17276990bc7e76f7b410c4"},"max_stars_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2018-08-20T16:36:40.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2018-08-20T16:36:40.000Z"},"max_issues_repo_path":{"kind":"string","value":"docs/notebooks/sdba.ipynb"},"max_issues_repo_name":{"kind":"string","value":"Ouranosinc/dcvar"},"max_issues_repo_head_hexsha":{"kind":"string","value":"0737c66a36f8969e7a17276990bc7e76f7b410c4"},"max_issues_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_issues_count":{"kind":"number","value":3,"string":"3"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2018-08-23T13:25:47.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2018-08-23T15:59:45.000Z"},"max_forks_repo_path":{"kind":"string","value":"docs/notebooks/sdba.ipynb"},"max_forks_repo_name":{"kind":"string","value":"Ouranosinc/dcvar"},"max_forks_repo_head_hexsha":{"kind":"string","value":"0737c66a36f8969e7a17276990bc7e76f7b410c4"},"max_forks_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"avg_line_length":{"kind":"number","value":40.6024930748,"string":"40.602493"},"max_line_length":{"kind":"number","value":701,"string":"701"},"alphanum_fraction":{"kind":"number","value":0.6044004776,"string":"0.6044"},"cells":{"kind":"list like","value":[[["# Statistical Downscaling and Bias-Adjustment\n\n`xclim` provides tools and utilities to ease the bias-adjustement process through its `xclim.sdba` module. Almost all adjustment algorithms conform to the `train` - `adjust` scheme, formalized within `TrainAdjust` classes. Given a reference time series (ref), historical simulations (hist) and simulations to be adjusted (sim), any bias-adjustment method would be applied by first estimating the adjustment factors between the historical simulation and the observations series, and then applying these factors to `sim`, which could be a future simulation.\n\nThis presents examples, while a bit more info and the API are given on [this page](../sdba.rst).\n\nA very simple \"Quantile Mapping\" approach is available through the \"Empirical Quantile Mapping\" object. The object is created through the `.train` method of the class, and the simulation is adjusted with `.adjust`.","_____no_output_____"]],[["from __future__ import annotations\n\nimport cftime\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport xarray as xr\n\n%matplotlib inline\nplt.style.use(\"seaborn\")\nplt.rcParams[\"figure.figsize\"] = (11, 5)\n\n# Create toy data to explore bias adjustment, here fake temperature timeseries\nt = xr.cftime_range(\"2000-01-01\", \"2030-12-31\", freq=\"D\", calendar=\"noleap\")\nref = xr.DataArray(\n (\n -20 * np.cos(2 * np.pi * t.dayofyear / 365)\n + 2 * np.random.random_sample((t.size,))\n + 273.15\n + 0.1 * (t - t[0]).days / 365\n ), # \"warming\" of 1K per decade,\n dims=(\"time\",),\n coords={\"time\": t},\n attrs={\"units\": \"K\"},\n)\nsim = xr.DataArray(\n (\n -18 * np.cos(2 * np.pi * t.dayofyear / 365)\n + 2 * np.random.random_sample((t.size,))\n + 273.15\n + 0.11 * (t - t[0]).days / 365\n ), # \"warming\" of 1.1K per decade\n dims=(\"time\",),\n coords={\"time\": t},\n attrs={\"units\": \"K\"},\n)\n\nref = ref.sel(time=slice(None, \"2015-01-01\"))\nhist = sim.sel(time=slice(None, \"2015-01-01\"))\n\nref.plot(label=\"Reference\")\nsim.plot(label=\"Model\")\nplt.legend()","_____no_output_____"],["from xclim import sdba\n\nQM = sdba.EmpiricalQuantileMapping.train(\n ref, hist, nquantiles=15, group=\"time\", kind=\"+\"\n)\nscen = QM.adjust(sim, extrapolation=\"constant\", interp=\"nearest\")\n\nref.groupby(\"time.dayofyear\").mean().plot(label=\"Reference\")\nhist.groupby(\"time.dayofyear\").mean().plot(label=\"Model - biased\")\nscen.sel(time=slice(\"2000\", \"2015\")).groupby(\"time.dayofyear\").mean().plot(\n label=\"Model - adjusted - 2000-15\", linestyle=\"--\"\n)\nscen.sel(time=slice(\"2015\", \"2030\")).groupby(\"time.dayofyear\").mean().plot(\n label=\"Model - adjusted - 2015-30\", linestyle=\"--\"\n)\nplt.legend()","_____no_output_____"]],[["In the previous example, a simple Quantile Mapping algorithm was used with 15 quantiles and one group of values. The model performs well, but our toy data is also quite smooth and well-behaved so this is not surprising. A more complex example could have biais distribution varying strongly across months. To perform the adjustment with different factors for each months, one can pass `group='time.month'`. Moreover, to reduce the risk of sharp change in the adjustment at the interface of the months, `interp='linear'` can be passed to `adjust` and the adjustment factors will be interpolated linearly. Ex: the factors for the 1st of May will be the average of those for april and those for may.","_____no_output_____"]],[["QM_mo = sdba.EmpiricalQuantileMapping.train(\n ref, hist, nquantiles=15, group=\"time.month\", kind=\"+\"\n)\nscen = QM_mo.adjust(sim, extrapolation=\"constant\", interp=\"linear\")\n\nref.groupby(\"time.dayofyear\").mean().plot(label=\"Reference\")\nhist.groupby(\"time.dayofyear\").mean().plot(label=\"Model - biased\")\nscen.sel(time=slice(\"2000\", \"2015\")).groupby(\"time.dayofyear\").mean().plot(\n label=\"Model - adjusted - 2000-15\", linestyle=\"--\"\n)\nscen.sel(time=slice(\"2015\", \"2030\")).groupby(\"time.dayofyear\").mean().plot(\n label=\"Model - adjusted - 2015-30\", linestyle=\"--\"\n)\nplt.legend()","_____no_output_____"]],[["The training data (here the adjustment factors) is available for inspection in the `ds` attribute of the adjustment object.","_____no_output_____"]],[["QM_mo.ds","_____no_output_____"],["QM_mo.ds.af.plot()","_____no_output_____"]],[["## Grouping\n\nFor basic time period grouping (months, day of year, season), passing a string to the methods needing it is sufficient. Most methods acting on grouped data also accept a `window` int argument to pad the groups with data from adjacent ones. Units of `window` are the sampling frequency of the main grouping dimension (usually `time`). For more complex grouping, or simply for clarity, one can pass a `xclim.sdba.base.Grouper` directly.\n\nExample here with another, simpler, adjustment method. Here we want `sim` to be scaled so that its mean fits the one of `ref`. Scaling factors are to be computed separately for each day of the year, but including 15 days on either side of the day. This means that the factor for the 1st of May is computed including all values from the 16th of April to the 15th of May (of all years).","_____no_output_____"]],[["group = sdba.Grouper(\"time.dayofyear\", window=31)\nQM_doy = sdba.Scaling.train(ref, hist, group=group, kind=\"+\")\nscen = QM_doy.adjust(sim)\n\nref.groupby(\"time.dayofyear\").mean().plot(label=\"Reference\")\nhist.groupby(\"time.dayofyear\").mean().plot(label=\"Model - biased\")\nscen.sel(time=slice(\"2000\", \"2015\")).groupby(\"time.dayofyear\").mean().plot(\n label=\"Model - adjusted - 2000-15\", linestyle=\"--\"\n)\nscen.sel(time=slice(\"2015\", \"2030\")).groupby(\"time.dayofyear\").mean().plot(\n label=\"Model - adjusted - 2015-30\", linestyle=\"--\"\n)\nplt.legend()","_____no_output_____"],["sim","_____no_output_____"],["QM_doy.ds.af.plot()","_____no_output_____"]],[["## Modular approach\n\nThe `sdba` module adopts a modular approach instead of implementing published and named methods directly.\nA generic bias adjustment process is laid out as follows:\n\n- preprocessing on `ref`, `hist` and `sim` (using methods in `xclim.sdba.processing` or `xclim.sdba.detrending`)\n- creating and training the adjustment object `Adj = Adjustment.train(obs, hist, **kwargs)` (from `xclim.sdba.adjustment`)\n- adjustment `scen = Adj.adjust(sim, **kwargs)`\n- post-processing on `scen` (for example: re-trending)\n\nThe train-adjust approach allows to inspect the trained adjustment object. The training information is stored in the underlying `Adj.ds` dataset and often has a `af` variable with the adjustment factors. Its layout and the other available variables vary between the different algorithm, refer to their part of the API docs.\n\nFor heavy processing, this separation allows the computation and writing to disk of the training dataset before performing the adjustment(s). See the [advanced notebook](sdba-advanced.ipynb).\n\nParameters needed by the training and the adjustment are saved to the `Adj.ds` dataset as a `adj_params` attribute. Other parameters, those only needed by the adjustment are passed in the `adjust` call and written to the history attribute in the output scenario dataarray.\n\n### First example : pr and frequency adaptation\n\nThe next example generates fake precipitation data and adjusts the `sim` timeseries but also adds a step where the dry-day frequency of `hist` is adapted so that is fits the one of `ref`. This ensures well-behaved adjustment factors for the smaller quantiles. Note also that we are passing `kind='*'` to use the multiplicative mode. Adjustment factors will be multiplied/divided instead of being added/substracted.","_____no_output_____"]],[["vals = np.random.randint(0, 1000, size=(t.size,)) / 100\nvals_ref = (4 ** np.where(vals < 9, vals / 100, vals)) / 3e6\nvals_sim = (\n (1 + 0.1 * np.random.random_sample((t.size,)))\n * (4 ** np.where(vals < 9.5, vals / 100, vals))\n / 3e6\n)\n\npr_ref = xr.DataArray(\n vals_ref, coords={\"time\": t}, dims=(\"time\",), attrs={\"units\": \"mm/day\"}\n)\npr_ref = pr_ref.sel(time=slice(\"2000\", \"2015\"))\npr_sim = xr.DataArray(\n vals_sim, coords={\"time\": t}, dims=(\"time\",), attrs={\"units\": \"mm/day\"}\n)\npr_hist = pr_sim.sel(time=slice(\"2000\", \"2015\"))\n\npr_ref.plot(alpha=0.9, label=\"Reference\")\npr_sim.plot(alpha=0.7, label=\"Model\")\nplt.legend()","_____no_output_____"],["# 1st try without adapt_freq\nQM = sdba.EmpiricalQuantileMapping.train(\n pr_ref, pr_hist, nquantiles=15, kind=\"*\", group=\"time\"\n)\nscen = QM.adjust(pr_sim)\n\npr_ref.sel(time=\"2010\").plot(alpha=0.9, label=\"Reference\")\npr_hist.sel(time=\"2010\").plot(alpha=0.7, label=\"Model - biased\")\nscen.sel(time=\"2010\").plot(alpha=0.6, label=\"Model - adjusted\")\nplt.legend()","_____no_output_____"]],[["In the figure above, `scen` has small peaks where `sim` is 0. This problem originates from the fact that there are more \"dry days\" (days with almost no precipitation) in `hist` than in `ref`. The next example works around the problem using frequency-adaptation, as described in [Themeßl et al. (2010)](https://doi.org/10.1007/s10584-011-0224-4).","_____no_output_____"]],[["# 2nd try with adapt_freq\nsim_ad, pth, dP0 = sdba.processing.adapt_freq(\n pr_ref, pr_sim, thresh=\"0.05 mm d-1\", group=\"time\"\n)\nQM_ad = sdba.EmpiricalQuantileMapping.train(\n pr_ref, sim_ad, nquantiles=15, kind=\"*\", group=\"time\"\n)\nscen_ad = QM_ad.adjust(pr_sim)\n\npr_ref.sel(time=\"2010\").plot(alpha=0.9, label=\"Reference\")\npr_sim.sel(time=\"2010\").plot(alpha=0.7, label=\"Model - biased\")\nscen_ad.sel(time=\"2010\").plot(alpha=0.6, label=\"Model - adjusted\")\nplt.legend()","_____no_output_____"]],[["### Second example: tas and detrending\n\nThe next example reuses the fake temperature timeseries generated at the beginning and applies the same QM adjustment method. However, for a better adjustment, we will scale sim to ref and then detrend the series, assuming the trend is linear. When `sim` (or `sim_scl`) is detrended, its values are now anomalies, so we need to normalize `ref` and `hist` so we can compare similar values.\n\nThis process is detailed here to show how the sdba module should be used in custom adjustment processes, but this specific method also exists as `sdba.DetrendedQuantileMapping` and is based on [Cannon et al. 2015](https://doi.org/10.1175/JCLI-D-14-00754.1). However, `DetrendedQuantileMapping` normalizes over a `time.dayofyear` group, regardless of what is passed in the `group` argument. As done here, it is anyway recommended to use `dayofyear` groups when normalizing, especially for variables with strong seasonal variations.","_____no_output_____"]],[["doy_win31 = sdba.Grouper(\"time.dayofyear\", window=15)\nSca = sdba.Scaling.train(ref, hist, group=doy_win31, kind=\"+\")\nsim_scl = Sca.adjust(sim)\n\ndetrender = sdba.detrending.PolyDetrend(degree=1, group=\"time.dayofyear\", kind=\"+\")\nsim_fit = detrender.fit(sim_scl)\nsim_detrended = sim_fit.detrend(sim_scl)\n\nref_n, _ = sdba.processing.normalize(ref, group=doy_win31, kind=\"+\")\nhist_n, _ = sdba.processing.normalize(hist, group=doy_win31, kind=\"+\")\n\nQM = sdba.EmpiricalQuantileMapping.train(\n ref_n, hist_n, nquantiles=15, group=\"time.month\", kind=\"+\"\n)\nscen_detrended = QM.adjust(sim_detrended, extrapolation=\"constant\", interp=\"nearest\")\nscen = sim_fit.retrend(scen_detrended)\n\n\nref.groupby(\"time.dayofyear\").mean().plot(label=\"Reference\")\nsim.groupby(\"time.dayofyear\").mean().plot(label=\"Model - biased\")\nscen.sel(time=slice(\"2000\", \"2015\")).groupby(\"time.dayofyear\").mean().plot(\n label=\"Model - adjusted - 2000-15\", linestyle=\"--\"\n)\nscen.sel(time=slice(\"2015\", \"2030\")).groupby(\"time.dayofyear\").mean().plot(\n label=\"Model - adjusted - 2015-30\", linestyle=\"--\"\n)\nplt.legend()","_____no_output_____"]],[["### Third example : Multi-method protocol - Hnilica et al. 2017\nIn [their paper of 2017](https://doi.org/10.1002/joc.4890), Hnilica, Hanel and Puš present a bias-adjustment method based on the principles of Principal Components Analysis. The idea is simple : use principal components to define coordinates on the reference and on the simulation and then transform the simulation data from the latter to the former. Spatial correlation can thus be conserved by taking different points as the dimensions of the transform space. The method was demonstrated in the article by bias-adjusting precipitation over different drainage basins.\n\nThe same method could be used for multivariate adjustment. The principle would be the same, concatening the different variables into a single dataset along a new dimension. An example is given in the [advanced notebook](sdba-advanced.ipynb).\n\nHere we show how the modularity of `xclim.sdba` can be used to construct a quite complex adjustment protocol involving two adjustment methods : quantile mapping and principal components. Evidently, as this example uses only 2 years of data, it is not complete. It is meant to show how the adjustment functions and how the API can be used.","_____no_output_____"]],[["# We are using xarray's \"air_temperature\" dataset\nds = xr.tutorial.open_dataset(\"air_temperature\")","_____no_output_____"],["# To get an exagerated example we select different points\n# here \"lon\" will be our dimension of two \"spatially correlated\" points\nreft = ds.air.isel(lat=21, lon=[40, 52]).drop_vars([\"lon\", \"lat\"])\nsimt = ds.air.isel(lat=18, lon=[17, 35]).drop_vars([\"lon\", \"lat\"])\n\n# Principal Components Adj, no grouping and use \"lon\" as the space dimensions\nPCA = sdba.PrincipalComponents.train(reft, simt, group=\"time\", crd_dim=\"lon\")\nscen1 = PCA.adjust(simt)\n\n# QM, no grouping, 20 quantiles and additive adjustment\nEQM = sdba.EmpiricalQuantileMapping.train(\n reft, scen1, group=\"time\", nquantiles=50, kind=\"+\"\n)\nscen2 = EQM.adjust(scen1)","_____no_output_____"],["# some Analysis figures\nfig = plt.figure(figsize=(12, 16))\ngs = plt.matplotlib.gridspec.GridSpec(3, 2, fig)\n\naxPCA = plt.subplot(gs[0, :])\naxPCA.scatter(reft.isel(lon=0), reft.isel(lon=1), s=20, label=\"Reference\")\naxPCA.scatter(simt.isel(lon=0), simt.isel(lon=1), s=10, label=\"Simulation\")\naxPCA.scatter(scen2.isel(lon=0), scen2.isel(lon=1), s=3, label=\"Adjusted - PCA+EQM\")\naxPCA.set_xlabel(\"Point 1\")\naxPCA.set_ylabel(\"Point 2\")\naxPCA.set_title(\"PC-space\")\naxPCA.legend()\n\nrefQ = reft.quantile(EQM.ds.quantiles, dim=\"time\")\nsimQ = simt.quantile(EQM.ds.quantiles, dim=\"time\")\nscen1Q = scen1.quantile(EQM.ds.quantiles, dim=\"time\")\nscen2Q = scen2.quantile(EQM.ds.quantiles, dim=\"time\")\nfor i in range(2):\n if i == 0:\n axQM = plt.subplot(gs[1, 0])\n else:\n axQM = plt.subplot(gs[1, 1], sharey=axQM)\n axQM.plot(refQ.isel(lon=i), simQ.isel(lon=i), label=\"No adj\")\n axQM.plot(refQ.isel(lon=i), scen1Q.isel(lon=i), label=\"PCA\")\n axQM.plot(refQ.isel(lon=i), scen2Q.isel(lon=i), label=\"PCA+EQM\")\n axQM.plot(\n refQ.isel(lon=i), refQ.isel(lon=i), color=\"k\", linestyle=\":\", label=\"Ideal\"\n )\n axQM.set_title(f\"QQ plot - Point {i + 1}\")\n axQM.set_xlabel(\"Reference\")\n axQM.set_xlabel(\"Model\")\n axQM.legend()\n\naxT = plt.subplot(gs[2, :])\nreft.isel(lon=0).plot(ax=axT, label=\"Reference\")\nsimt.isel(lon=0).plot(ax=axT, label=\"Unadjusted sim\")\n# scen1.isel(lon=0).plot(ax=axT, label='PCA only')\nscen2.isel(lon=0).plot(ax=axT, label=\"PCA+EQM\")\naxT.legend()\naxT.set_title(\"Timeseries - Point 1\")","_____no_output_____"]],[["### Fourth example : Multivariate bias-adjustment with multiple steps - Cannon 2018\n\nThis section replicates the \"MBCn\" algorithm described by [Cannon (2018)](https://doi.org/10.1007/s00382-017-3580-6). The method relies on some univariate algorithm, an adaption of the N-pdf transform of [Pitié et al. (2005)](https://ieeexplore.ieee.org/document/1544887/) and a final reordering step.\n\nIn the following, we use the AHCCD and CanESM2 data are reference and simulation and we correct both `pr` and `tasmax` together.","_____no_output_____"]],[["from xclim.core.units import convert_units_to\nfrom xclim.testing import open_dataset\n\ndref = open_dataset(\n \"sdba/ahccd_1950-2013.nc\", chunks={\"location\": 1}, drop_variables=[\"lat\", \"lon\"]\n).sel(time=slice(\"1981\", \"2010\"))\ndref = dref.assign(\n tasmax=convert_units_to(dref.tasmax, \"K\"),\n pr=convert_units_to(dref.pr, \"kg m-2 s-1\"),\n)\ndsim = open_dataset(\n \"sdba/CanESM2_1950-2100.nc\", chunks={\"location\": 1}, drop_variables=[\"lat\", \"lon\"]\n)\n\ndhist = dsim.sel(time=slice(\"1981\", \"2010\"))\ndsim = dsim.sel(time=slice(\"2041\", \"2070\"))\ndref","_____no_output_____"]],[["##### Perform an initial univariate adjustment.","_____no_output_____"]],[["# additive for tasmax\nQDMtx = sdba.QuantileDeltaMapping.train(\n dref.tasmax, dhist.tasmax, nquantiles=20, kind=\"+\", group=\"time\"\n)\n# Adjust both hist and sim, we'll feed both to the Npdf transform.\nscenh_tx = QDMtx.adjust(dhist.tasmax)\nscens_tx = QDMtx.adjust(dsim.tasmax)\n\n# remove == 0 values in pr:\ndref[\"pr\"] = sdba.processing.jitter_under_thresh(dref.pr, \"0.01 mm d-1\")\ndhist[\"pr\"] = sdba.processing.jitter_under_thresh(dhist.pr, \"0.01 mm d-1\")\ndsim[\"pr\"] = sdba.processing.jitter_under_thresh(dsim.pr, \"0.01 mm d-1\")\n\n# multiplicative for pr\nQDMpr = sdba.QuantileDeltaMapping.train(\n dref.pr, dhist.pr, nquantiles=20, kind=\"*\", group=\"time\"\n)\n# Adjust both hist and sim, we'll feed both to the Npdf transform.\nscenh_pr = QDMpr.adjust(dhist.pr)\nscens_pr = QDMpr.adjust(dsim.pr)\n\nscenh = xr.Dataset(dict(tasmax=scenh_tx, pr=scenh_pr))\nscens = xr.Dataset(dict(tasmax=scens_tx, pr=scens_pr))","_____no_output_____"]],[["##### Stack the variables to multivariate arrays and standardize them\nThe standardization process ensure the mean and standard deviation of each column (variable) is 0 and 1 respectively.\n\n`hist` and `sim` are standardized together so the two series are coherent. We keep the mean and standard deviation to be reused when we build the result.","_____no_output_____"]],[["# Stack the variables (tasmax and pr)\nref = sdba.processing.stack_variables(dref)\nscenh = sdba.processing.stack_variables(scenh)\nscens = sdba.processing.stack_variables(scens)\n\n# Standardize\nref, _, _ = sdba.processing.standardize(ref)\n\nallsim, savg, sstd = sdba.processing.standardize(xr.concat((scenh, scens), \"time\"))\nhist = allsim.sel(time=scenh.time)\nsim = allsim.sel(time=scens.time)","_____no_output_____"]],[["##### Perform the N-dimensional probability density function transform\n\nThe NpdfTransform will iteratively randomly rotate our arrays in the \"variables\" space and apply the univariate adjustment before rotating it back. In Cannon (2018) and Pitié et al. (2005), it can be seen that the source array's joint distribution converges toward the target's joint distribution when a large number of iterations is done.","_____no_output_____"]],[["from xclim import set_options\n\n# See the advanced notebook for details on how this option work\nwith set_options(sdba_extra_output=True):\n out = sdba.adjustment.NpdfTransform.adjust(\n ref,\n hist,\n sim,\n base=sdba.QuantileDeltaMapping, # Use QDM as the univariate adjustment.\n base_kws={\"nquantiles\": 20, \"group\": \"time\"},\n n_iter=20, # perform 20 iteration\n n_escore=1000, # only send 1000 points to the escore metric (it is realy slow)\n )\n\nscenh = out.scenh.rename(time_hist=\"time\") # Bias-adjusted historical period\nscens = out.scen # Bias-adjusted future period\nextra = out.drop_vars([\"scenh\", \"scen\"])\n\n# Un-standardize (add the mean and the std back)\nscenh = sdba.processing.unstandardize(scenh, savg, sstd)\nscens = sdba.processing.unstandardize(scens, savg, sstd)","_____no_output_____"]],[["##### Restoring the trend\n\nThe NpdfT has given us new \"hist\" and \"sim\" arrays with a correct rank structure. However, the trend is lost in this process. We reorder the result of the initial adjustment according to the rank structure of the NpdfT outputs to get our final bias-adjusted series.\n\n`sdba.processing.reordering` : 'ref' the argument that provides the order, 'sim' is the argument to reorder.","_____no_output_____"]],[["scenh = sdba.processing.reordering(hist, scenh, group=\"time\")\nscens = sdba.processing.reordering(sim, scens, group=\"time\")","_____no_output_____"],["scenh = sdba.processing.unstack_variables(scenh)\nscens = sdba.processing.unstack_variables(scens)","_____no_output_____"]],[["##### There we are!\n\nLet's trigger all the computations. Here we write the data to disk and use `compute=False` in order to trigger the whole computation tree only once. There seems to be no way in xarray to do the same with a `load` call.","_____no_output_____"]],[["from dask import compute\nfrom dask.diagnostics import ProgressBar\n\ntasks = [\n scenh.isel(location=2).to_netcdf(\"mbcn_scen_hist_loc2.nc\", compute=False),\n scens.isel(location=2).to_netcdf(\"mbcn_scen_sim_loc2.nc\", compute=False),\n extra.escores.isel(location=2)\n .to_dataset()\n .to_netcdf(\"mbcn_escores_loc2.nc\", compute=False),\n]\n\nwith ProgressBar():\n compute(tasks)","_____no_output_____"]],[["Let's compare the series and look at the distance scores to see how well the Npdf transform has converged.","_____no_output_____"]],[["scenh = xr.open_dataset(\"mbcn_scen_hist_loc2.nc\")\n\nfig, ax = plt.subplots()\n\ndref.isel(location=2).tasmax.plot(ax=ax, label=\"Reference\")\nscenh.tasmax.plot(ax=ax, label=\"Adjusted\", alpha=0.65)\ndhist.isel(location=2).tasmax.plot(ax=ax, label=\"Simulated\")\n\nax.legend()","_____no_output_____"],["escores = xr.open_dataarray(\"mbcn_escores_loc2.nc\")\ndiff_escore = escores.differentiate(\"iterations\")\ndiff_escore.plot()\nplt.title(\"Difference of the subsequent e-scores.\")\nplt.ylabel(\"E-scores difference\")","_____no_output_____"],["diff_escore","_____no_output_____"]],[["The tutorial continues in the [advanced notebook](sdba-advanced.ipynb) with more on optimization with dask, other fancier detrending algorithms and an example pipeline for heavy processing.","_____no_output_____"]]],"string":"[\n [\n [\n \"# Statistical Downscaling and Bias-Adjustment\\n\\n`xclim` provides tools and utilities to ease the bias-adjustement process through its `xclim.sdba` module. Almost all adjustment algorithms conform to the `train` - `adjust` scheme, formalized within `TrainAdjust` classes. Given a reference time series (ref), historical simulations (hist) and simulations to be adjusted (sim), any bias-adjustment method would be applied by first estimating the adjustment factors between the historical simulation and the observations series, and then applying these factors to `sim`, which could be a future simulation.\\n\\nThis presents examples, while a bit more info and the API are given on [this page](../sdba.rst).\\n\\nA very simple \\\"Quantile Mapping\\\" approach is available through the \\\"Empirical Quantile Mapping\\\" object. The object is created through the `.train` method of the class, and the simulation is adjusted with `.adjust`.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"from __future__ import annotations\\n\\nimport cftime\\nimport matplotlib.pyplot as plt\\nimport numpy as np\\nimport xarray as xr\\n\\n%matplotlib inline\\nplt.style.use(\\\"seaborn\\\")\\nplt.rcParams[\\\"figure.figsize\\\"] = (11, 5)\\n\\n# Create toy data to explore bias adjustment, here fake temperature timeseries\\nt = xr.cftime_range(\\\"2000-01-01\\\", \\\"2030-12-31\\\", freq=\\\"D\\\", calendar=\\\"noleap\\\")\\nref = xr.DataArray(\\n (\\n -20 * np.cos(2 * np.pi * t.dayofyear / 365)\\n + 2 * np.random.random_sample((t.size,))\\n + 273.15\\n + 0.1 * (t - t[0]).days / 365\\n ), # \\\"warming\\\" of 1K per decade,\\n dims=(\\\"time\\\",),\\n coords={\\\"time\\\": t},\\n attrs={\\\"units\\\": \\\"K\\\"},\\n)\\nsim = xr.DataArray(\\n (\\n -18 * np.cos(2 * np.pi * t.dayofyear / 365)\\n + 2 * np.random.random_sample((t.size,))\\n + 273.15\\n + 0.11 * (t - t[0]).days / 365\\n ), # \\\"warming\\\" of 1.1K per decade\\n dims=(\\\"time\\\",),\\n coords={\\\"time\\\": t},\\n attrs={\\\"units\\\": \\\"K\\\"},\\n)\\n\\nref = ref.sel(time=slice(None, \\\"2015-01-01\\\"))\\nhist = sim.sel(time=slice(None, \\\"2015-01-01\\\"))\\n\\nref.plot(label=\\\"Reference\\\")\\nsim.plot(label=\\\"Model\\\")\\nplt.legend()\",\n \"_____no_output_____\"\n ],\n [\n \"from xclim import sdba\\n\\nQM = sdba.EmpiricalQuantileMapping.train(\\n ref, hist, nquantiles=15, group=\\\"time\\\", kind=\\\"+\\\"\\n)\\nscen = QM.adjust(sim, extrapolation=\\\"constant\\\", interp=\\\"nearest\\\")\\n\\nref.groupby(\\\"time.dayofyear\\\").mean().plot(label=\\\"Reference\\\")\\nhist.groupby(\\\"time.dayofyear\\\").mean().plot(label=\\\"Model - biased\\\")\\nscen.sel(time=slice(\\\"2000\\\", \\\"2015\\\")).groupby(\\\"time.dayofyear\\\").mean().plot(\\n label=\\\"Model - adjusted - 2000-15\\\", linestyle=\\\"--\\\"\\n)\\nscen.sel(time=slice(\\\"2015\\\", \\\"2030\\\")).groupby(\\\"time.dayofyear\\\").mean().plot(\\n label=\\\"Model - adjusted - 2015-30\\\", linestyle=\\\"--\\\"\\n)\\nplt.legend()\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"In the previous example, a simple Quantile Mapping algorithm was used with 15 quantiles and one group of values. The model performs well, but our toy data is also quite smooth and well-behaved so this is not surprising. A more complex example could have biais distribution varying strongly across months. To perform the adjustment with different factors for each months, one can pass `group='time.month'`. Moreover, to reduce the risk of sharp change in the adjustment at the interface of the months, `interp='linear'` can be passed to `adjust` and the adjustment factors will be interpolated linearly. Ex: the factors for the 1st of May will be the average of those for april and those for may.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"QM_mo = sdba.EmpiricalQuantileMapping.train(\\n ref, hist, nquantiles=15, group=\\\"time.month\\\", kind=\\\"+\\\"\\n)\\nscen = QM_mo.adjust(sim, extrapolation=\\\"constant\\\", interp=\\\"linear\\\")\\n\\nref.groupby(\\\"time.dayofyear\\\").mean().plot(label=\\\"Reference\\\")\\nhist.groupby(\\\"time.dayofyear\\\").mean().plot(label=\\\"Model - biased\\\")\\nscen.sel(time=slice(\\\"2000\\\", \\\"2015\\\")).groupby(\\\"time.dayofyear\\\").mean().plot(\\n label=\\\"Model - adjusted - 2000-15\\\", linestyle=\\\"--\\\"\\n)\\nscen.sel(time=slice(\\\"2015\\\", \\\"2030\\\")).groupby(\\\"time.dayofyear\\\").mean().plot(\\n label=\\\"Model - adjusted - 2015-30\\\", linestyle=\\\"--\\\"\\n)\\nplt.legend()\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"The training data (here the adjustment factors) is available for inspection in the `ds` attribute of the adjustment object.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"QM_mo.ds\",\n \"_____no_output_____\"\n ],\n [\n \"QM_mo.ds.af.plot()\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"## Grouping\\n\\nFor basic time period grouping (months, day of year, season), passing a string to the methods needing it is sufficient. Most methods acting on grouped data also accept a `window` int argument to pad the groups with data from adjacent ones. Units of `window` are the sampling frequency of the main grouping dimension (usually `time`). For more complex grouping, or simply for clarity, one can pass a `xclim.sdba.base.Grouper` directly.\\n\\nExample here with another, simpler, adjustment method. Here we want `sim` to be scaled so that its mean fits the one of `ref`. Scaling factors are to be computed separately for each day of the year, but including 15 days on either side of the day. This means that the factor for the 1st of May is computed including all values from the 16th of April to the 15th of May (of all years).\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"group = sdba.Grouper(\\\"time.dayofyear\\\", window=31)\\nQM_doy = sdba.Scaling.train(ref, hist, group=group, kind=\\\"+\\\")\\nscen = QM_doy.adjust(sim)\\n\\nref.groupby(\\\"time.dayofyear\\\").mean().plot(label=\\\"Reference\\\")\\nhist.groupby(\\\"time.dayofyear\\\").mean().plot(label=\\\"Model - biased\\\")\\nscen.sel(time=slice(\\\"2000\\\", \\\"2015\\\")).groupby(\\\"time.dayofyear\\\").mean().plot(\\n label=\\\"Model - adjusted - 2000-15\\\", linestyle=\\\"--\\\"\\n)\\nscen.sel(time=slice(\\\"2015\\\", \\\"2030\\\")).groupby(\\\"time.dayofyear\\\").mean().plot(\\n label=\\\"Model - adjusted - 2015-30\\\", linestyle=\\\"--\\\"\\n)\\nplt.legend()\",\n \"_____no_output_____\"\n ],\n [\n \"sim\",\n \"_____no_output_____\"\n ],\n [\n \"QM_doy.ds.af.plot()\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"## Modular approach\\n\\nThe `sdba` module adopts a modular approach instead of implementing published and named methods directly.\\nA generic bias adjustment process is laid out as follows:\\n\\n- preprocessing on `ref`, `hist` and `sim` (using methods in `xclim.sdba.processing` or `xclim.sdba.detrending`)\\n- creating and training the adjustment object `Adj = Adjustment.train(obs, hist, **kwargs)` (from `xclim.sdba.adjustment`)\\n- adjustment `scen = Adj.adjust(sim, **kwargs)`\\n- post-processing on `scen` (for example: re-trending)\\n\\nThe train-adjust approach allows to inspect the trained adjustment object. The training information is stored in the underlying `Adj.ds` dataset and often has a `af` variable with the adjustment factors. Its layout and the other available variables vary between the different algorithm, refer to their part of the API docs.\\n\\nFor heavy processing, this separation allows the computation and writing to disk of the training dataset before performing the adjustment(s). See the [advanced notebook](sdba-advanced.ipynb).\\n\\nParameters needed by the training and the adjustment are saved to the `Adj.ds` dataset as a `adj_params` attribute. Other parameters, those only needed by the adjustment are passed in the `adjust` call and written to the history attribute in the output scenario dataarray.\\n\\n### First example : pr and frequency adaptation\\n\\nThe next example generates fake precipitation data and adjusts the `sim` timeseries but also adds a step where the dry-day frequency of `hist` is adapted so that is fits the one of `ref`. This ensures well-behaved adjustment factors for the smaller quantiles. Note also that we are passing `kind='*'` to use the multiplicative mode. Adjustment factors will be multiplied/divided instead of being added/substracted.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"vals = np.random.randint(0, 1000, size=(t.size,)) / 100\\nvals_ref = (4 ** np.where(vals < 9, vals / 100, vals)) / 3e6\\nvals_sim = (\\n (1 + 0.1 * np.random.random_sample((t.size,)))\\n * (4 ** np.where(vals < 9.5, vals / 100, vals))\\n / 3e6\\n)\\n\\npr_ref = xr.DataArray(\\n vals_ref, coords={\\\"time\\\": t}, dims=(\\\"time\\\",), attrs={\\\"units\\\": \\\"mm/day\\\"}\\n)\\npr_ref = pr_ref.sel(time=slice(\\\"2000\\\", \\\"2015\\\"))\\npr_sim = xr.DataArray(\\n vals_sim, coords={\\\"time\\\": t}, dims=(\\\"time\\\",), attrs={\\\"units\\\": \\\"mm/day\\\"}\\n)\\npr_hist = pr_sim.sel(time=slice(\\\"2000\\\", \\\"2015\\\"))\\n\\npr_ref.plot(alpha=0.9, label=\\\"Reference\\\")\\npr_sim.plot(alpha=0.7, label=\\\"Model\\\")\\nplt.legend()\",\n \"_____no_output_____\"\n ],\n [\n \"# 1st try without adapt_freq\\nQM = sdba.EmpiricalQuantileMapping.train(\\n pr_ref, pr_hist, nquantiles=15, kind=\\\"*\\\", group=\\\"time\\\"\\n)\\nscen = QM.adjust(pr_sim)\\n\\npr_ref.sel(time=\\\"2010\\\").plot(alpha=0.9, label=\\\"Reference\\\")\\npr_hist.sel(time=\\\"2010\\\").plot(alpha=0.7, label=\\\"Model - biased\\\")\\nscen.sel(time=\\\"2010\\\").plot(alpha=0.6, label=\\\"Model - adjusted\\\")\\nplt.legend()\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"In the figure above, `scen` has small peaks where `sim` is 0. This problem originates from the fact that there are more \\\"dry days\\\" (days with almost no precipitation) in `hist` than in `ref`. The next example works around the problem using frequency-adaptation, as described in [Themeßl et al. (2010)](https://doi.org/10.1007/s10584-011-0224-4).\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# 2nd try with adapt_freq\\nsim_ad, pth, dP0 = sdba.processing.adapt_freq(\\n pr_ref, pr_sim, thresh=\\\"0.05 mm d-1\\\", group=\\\"time\\\"\\n)\\nQM_ad = sdba.EmpiricalQuantileMapping.train(\\n pr_ref, sim_ad, nquantiles=15, kind=\\\"*\\\", group=\\\"time\\\"\\n)\\nscen_ad = QM_ad.adjust(pr_sim)\\n\\npr_ref.sel(time=\\\"2010\\\").plot(alpha=0.9, label=\\\"Reference\\\")\\npr_sim.sel(time=\\\"2010\\\").plot(alpha=0.7, label=\\\"Model - biased\\\")\\nscen_ad.sel(time=\\\"2010\\\").plot(alpha=0.6, label=\\\"Model - adjusted\\\")\\nplt.legend()\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"### Second example: tas and detrending\\n\\nThe next example reuses the fake temperature timeseries generated at the beginning and applies the same QM adjustment method. However, for a better adjustment, we will scale sim to ref and then detrend the series, assuming the trend is linear. When `sim` (or `sim_scl`) is detrended, its values are now anomalies, so we need to normalize `ref` and `hist` so we can compare similar values.\\n\\nThis process is detailed here to show how the sdba module should be used in custom adjustment processes, but this specific method also exists as `sdba.DetrendedQuantileMapping` and is based on [Cannon et al. 2015](https://doi.org/10.1175/JCLI-D-14-00754.1). However, `DetrendedQuantileMapping` normalizes over a `time.dayofyear` group, regardless of what is passed in the `group` argument. As done here, it is anyway recommended to use `dayofyear` groups when normalizing, especially for variables with strong seasonal variations.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"doy_win31 = sdba.Grouper(\\\"time.dayofyear\\\", window=15)\\nSca = sdba.Scaling.train(ref, hist, group=doy_win31, kind=\\\"+\\\")\\nsim_scl = Sca.adjust(sim)\\n\\ndetrender = sdba.detrending.PolyDetrend(degree=1, group=\\\"time.dayofyear\\\", kind=\\\"+\\\")\\nsim_fit = detrender.fit(sim_scl)\\nsim_detrended = sim_fit.detrend(sim_scl)\\n\\nref_n, _ = sdba.processing.normalize(ref, group=doy_win31, kind=\\\"+\\\")\\nhist_n, _ = sdba.processing.normalize(hist, group=doy_win31, kind=\\\"+\\\")\\n\\nQM = sdba.EmpiricalQuantileMapping.train(\\n ref_n, hist_n, nquantiles=15, group=\\\"time.month\\\", kind=\\\"+\\\"\\n)\\nscen_detrended = QM.adjust(sim_detrended, extrapolation=\\\"constant\\\", interp=\\\"nearest\\\")\\nscen = sim_fit.retrend(scen_detrended)\\n\\n\\nref.groupby(\\\"time.dayofyear\\\").mean().plot(label=\\\"Reference\\\")\\nsim.groupby(\\\"time.dayofyear\\\").mean().plot(label=\\\"Model - biased\\\")\\nscen.sel(time=slice(\\\"2000\\\", \\\"2015\\\")).groupby(\\\"time.dayofyear\\\").mean().plot(\\n label=\\\"Model - adjusted - 2000-15\\\", linestyle=\\\"--\\\"\\n)\\nscen.sel(time=slice(\\\"2015\\\", \\\"2030\\\")).groupby(\\\"time.dayofyear\\\").mean().plot(\\n label=\\\"Model - adjusted - 2015-30\\\", linestyle=\\\"--\\\"\\n)\\nplt.legend()\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"### Third example : Multi-method protocol - Hnilica et al. 2017\\nIn [their paper of 2017](https://doi.org/10.1002/joc.4890), Hnilica, Hanel and Puš present a bias-adjustment method based on the principles of Principal Components Analysis. The idea is simple : use principal components to define coordinates on the reference and on the simulation and then transform the simulation data from the latter to the former. Spatial correlation can thus be conserved by taking different points as the dimensions of the transform space. The method was demonstrated in the article by bias-adjusting precipitation over different drainage basins.\\n\\nThe same method could be used for multivariate adjustment. The principle would be the same, concatening the different variables into a single dataset along a new dimension. An example is given in the [advanced notebook](sdba-advanced.ipynb).\\n\\nHere we show how the modularity of `xclim.sdba` can be used to construct a quite complex adjustment protocol involving two adjustment methods : quantile mapping and principal components. Evidently, as this example uses only 2 years of data, it is not complete. It is meant to show how the adjustment functions and how the API can be used.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# We are using xarray's \\\"air_temperature\\\" dataset\\nds = xr.tutorial.open_dataset(\\\"air_temperature\\\")\",\n \"_____no_output_____\"\n ],\n [\n \"# To get an exagerated example we select different points\\n# here \\\"lon\\\" will be our dimension of two \\\"spatially correlated\\\" points\\nreft = ds.air.isel(lat=21, lon=[40, 52]).drop_vars([\\\"lon\\\", \\\"lat\\\"])\\nsimt = ds.air.isel(lat=18, lon=[17, 35]).drop_vars([\\\"lon\\\", \\\"lat\\\"])\\n\\n# Principal Components Adj, no grouping and use \\\"lon\\\" as the space dimensions\\nPCA = sdba.PrincipalComponents.train(reft, simt, group=\\\"time\\\", crd_dim=\\\"lon\\\")\\nscen1 = PCA.adjust(simt)\\n\\n# QM, no grouping, 20 quantiles and additive adjustment\\nEQM = sdba.EmpiricalQuantileMapping.train(\\n reft, scen1, group=\\\"time\\\", nquantiles=50, kind=\\\"+\\\"\\n)\\nscen2 = EQM.adjust(scen1)\",\n \"_____no_output_____\"\n ],\n [\n \"# some Analysis figures\\nfig = plt.figure(figsize=(12, 16))\\ngs = plt.matplotlib.gridspec.GridSpec(3, 2, fig)\\n\\naxPCA = plt.subplot(gs[0, :])\\naxPCA.scatter(reft.isel(lon=0), reft.isel(lon=1), s=20, label=\\\"Reference\\\")\\naxPCA.scatter(simt.isel(lon=0), simt.isel(lon=1), s=10, label=\\\"Simulation\\\")\\naxPCA.scatter(scen2.isel(lon=0), scen2.isel(lon=1), s=3, label=\\\"Adjusted - PCA+EQM\\\")\\naxPCA.set_xlabel(\\\"Point 1\\\")\\naxPCA.set_ylabel(\\\"Point 2\\\")\\naxPCA.set_title(\\\"PC-space\\\")\\naxPCA.legend()\\n\\nrefQ = reft.quantile(EQM.ds.quantiles, dim=\\\"time\\\")\\nsimQ = simt.quantile(EQM.ds.quantiles, dim=\\\"time\\\")\\nscen1Q = scen1.quantile(EQM.ds.quantiles, dim=\\\"time\\\")\\nscen2Q = scen2.quantile(EQM.ds.quantiles, dim=\\\"time\\\")\\nfor i in range(2):\\n if i == 0:\\n axQM = plt.subplot(gs[1, 0])\\n else:\\n axQM = plt.subplot(gs[1, 1], sharey=axQM)\\n axQM.plot(refQ.isel(lon=i), simQ.isel(lon=i), label=\\\"No adj\\\")\\n axQM.plot(refQ.isel(lon=i), scen1Q.isel(lon=i), label=\\\"PCA\\\")\\n axQM.plot(refQ.isel(lon=i), scen2Q.isel(lon=i), label=\\\"PCA+EQM\\\")\\n axQM.plot(\\n refQ.isel(lon=i), refQ.isel(lon=i), color=\\\"k\\\", linestyle=\\\":\\\", label=\\\"Ideal\\\"\\n )\\n axQM.set_title(f\\\"QQ plot - Point {i + 1}\\\")\\n axQM.set_xlabel(\\\"Reference\\\")\\n axQM.set_xlabel(\\\"Model\\\")\\n axQM.legend()\\n\\naxT = plt.subplot(gs[2, :])\\nreft.isel(lon=0).plot(ax=axT, label=\\\"Reference\\\")\\nsimt.isel(lon=0).plot(ax=axT, label=\\\"Unadjusted sim\\\")\\n# scen1.isel(lon=0).plot(ax=axT, label='PCA only')\\nscen2.isel(lon=0).plot(ax=axT, label=\\\"PCA+EQM\\\")\\naxT.legend()\\naxT.set_title(\\\"Timeseries - Point 1\\\")\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"### Fourth example : Multivariate bias-adjustment with multiple steps - Cannon 2018\\n\\nThis section replicates the \\\"MBCn\\\" algorithm described by [Cannon (2018)](https://doi.org/10.1007/s00382-017-3580-6). The method relies on some univariate algorithm, an adaption of the N-pdf transform of [Pitié et al. (2005)](https://ieeexplore.ieee.org/document/1544887/) and a final reordering step.\\n\\nIn the following, we use the AHCCD and CanESM2 data are reference and simulation and we correct both `pr` and `tasmax` together.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"from xclim.core.units import convert_units_to\\nfrom xclim.testing import open_dataset\\n\\ndref = open_dataset(\\n \\\"sdba/ahccd_1950-2013.nc\\\", chunks={\\\"location\\\": 1}, drop_variables=[\\\"lat\\\", \\\"lon\\\"]\\n).sel(time=slice(\\\"1981\\\", \\\"2010\\\"))\\ndref = dref.assign(\\n tasmax=convert_units_to(dref.tasmax, \\\"K\\\"),\\n pr=convert_units_to(dref.pr, \\\"kg m-2 s-1\\\"),\\n)\\ndsim = open_dataset(\\n \\\"sdba/CanESM2_1950-2100.nc\\\", chunks={\\\"location\\\": 1}, drop_variables=[\\\"lat\\\", \\\"lon\\\"]\\n)\\n\\ndhist = dsim.sel(time=slice(\\\"1981\\\", \\\"2010\\\"))\\ndsim = dsim.sel(time=slice(\\\"2041\\\", \\\"2070\\\"))\\ndref\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"##### Perform an initial univariate adjustment.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# additive for tasmax\\nQDMtx = sdba.QuantileDeltaMapping.train(\\n dref.tasmax, dhist.tasmax, nquantiles=20, kind=\\\"+\\\", group=\\\"time\\\"\\n)\\n# Adjust both hist and sim, we'll feed both to the Npdf transform.\\nscenh_tx = QDMtx.adjust(dhist.tasmax)\\nscens_tx = QDMtx.adjust(dsim.tasmax)\\n\\n# remove == 0 values in pr:\\ndref[\\\"pr\\\"] = sdba.processing.jitter_under_thresh(dref.pr, \\\"0.01 mm d-1\\\")\\ndhist[\\\"pr\\\"] = sdba.processing.jitter_under_thresh(dhist.pr, \\\"0.01 mm d-1\\\")\\ndsim[\\\"pr\\\"] = sdba.processing.jitter_under_thresh(dsim.pr, \\\"0.01 mm d-1\\\")\\n\\n# multiplicative for pr\\nQDMpr = sdba.QuantileDeltaMapping.train(\\n dref.pr, dhist.pr, nquantiles=20, kind=\\\"*\\\", group=\\\"time\\\"\\n)\\n# Adjust both hist and sim, we'll feed both to the Npdf transform.\\nscenh_pr = QDMpr.adjust(dhist.pr)\\nscens_pr = QDMpr.adjust(dsim.pr)\\n\\nscenh = xr.Dataset(dict(tasmax=scenh_tx, pr=scenh_pr))\\nscens = xr.Dataset(dict(tasmax=scens_tx, pr=scens_pr))\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"##### Stack the variables to multivariate arrays and standardize them\\nThe standardization process ensure the mean and standard deviation of each column (variable) is 0 and 1 respectively.\\n\\n`hist` and `sim` are standardized together so the two series are coherent. We keep the mean and standard deviation to be reused when we build the result.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# Stack the variables (tasmax and pr)\\nref = sdba.processing.stack_variables(dref)\\nscenh = sdba.processing.stack_variables(scenh)\\nscens = sdba.processing.stack_variables(scens)\\n\\n# Standardize\\nref, _, _ = sdba.processing.standardize(ref)\\n\\nallsim, savg, sstd = sdba.processing.standardize(xr.concat((scenh, scens), \\\"time\\\"))\\nhist = allsim.sel(time=scenh.time)\\nsim = allsim.sel(time=scens.time)\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"##### Perform the N-dimensional probability density function transform\\n\\nThe NpdfTransform will iteratively randomly rotate our arrays in the \\\"variables\\\" space and apply the univariate adjustment before rotating it back. In Cannon (2018) and Pitié et al. (2005), it can be seen that the source array's joint distribution converges toward the target's joint distribution when a large number of iterations is done.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"from xclim import set_options\\n\\n# See the advanced notebook for details on how this option work\\nwith set_options(sdba_extra_output=True):\\n out = sdba.adjustment.NpdfTransform.adjust(\\n ref,\\n hist,\\n sim,\\n base=sdba.QuantileDeltaMapping, # Use QDM as the univariate adjustment.\\n base_kws={\\\"nquantiles\\\": 20, \\\"group\\\": \\\"time\\\"},\\n n_iter=20, # perform 20 iteration\\n n_escore=1000, # only send 1000 points to the escore metric (it is realy slow)\\n )\\n\\nscenh = out.scenh.rename(time_hist=\\\"time\\\") # Bias-adjusted historical period\\nscens = out.scen # Bias-adjusted future period\\nextra = out.drop_vars([\\\"scenh\\\", \\\"scen\\\"])\\n\\n# Un-standardize (add the mean and the std back)\\nscenh = sdba.processing.unstandardize(scenh, savg, sstd)\\nscens = sdba.processing.unstandardize(scens, savg, sstd)\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"##### Restoring the trend\\n\\nThe NpdfT has given us new \\\"hist\\\" and \\\"sim\\\" arrays with a correct rank structure. However, the trend is lost in this process. We reorder the result of the initial adjustment according to the rank structure of the NpdfT outputs to get our final bias-adjusted series.\\n\\n`sdba.processing.reordering` : 'ref' the argument that provides the order, 'sim' is the argument to reorder.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"scenh = sdba.processing.reordering(hist, scenh, group=\\\"time\\\")\\nscens = sdba.processing.reordering(sim, scens, group=\\\"time\\\")\",\n \"_____no_output_____\"\n ],\n [\n \"scenh = sdba.processing.unstack_variables(scenh)\\nscens = sdba.processing.unstack_variables(scens)\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"##### There we are!\\n\\nLet's trigger all the computations. Here we write the data to disk and use `compute=False` in order to trigger the whole computation tree only once. There seems to be no way in xarray to do the same with a `load` call.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"from dask import compute\\nfrom dask.diagnostics import ProgressBar\\n\\ntasks = [\\n scenh.isel(location=2).to_netcdf(\\\"mbcn_scen_hist_loc2.nc\\\", compute=False),\\n scens.isel(location=2).to_netcdf(\\\"mbcn_scen_sim_loc2.nc\\\", compute=False),\\n extra.escores.isel(location=2)\\n .to_dataset()\\n .to_netcdf(\\\"mbcn_escores_loc2.nc\\\", compute=False),\\n]\\n\\nwith ProgressBar():\\n compute(tasks)\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"Let's compare the series and look at the distance scores to see how well the Npdf transform has converged.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"scenh = xr.open_dataset(\\\"mbcn_scen_hist_loc2.nc\\\")\\n\\nfig, ax = plt.subplots()\\n\\ndref.isel(location=2).tasmax.plot(ax=ax, label=\\\"Reference\\\")\\nscenh.tasmax.plot(ax=ax, label=\\\"Adjusted\\\", alpha=0.65)\\ndhist.isel(location=2).tasmax.plot(ax=ax, label=\\\"Simulated\\\")\\n\\nax.legend()\",\n \"_____no_output_____\"\n ],\n [\n \"escores = xr.open_dataarray(\\\"mbcn_escores_loc2.nc\\\")\\ndiff_escore = escores.differentiate(\\\"iterations\\\")\\ndiff_escore.plot()\\nplt.title(\\\"Difference of the subsequent e-scores.\\\")\\nplt.ylabel(\\\"E-scores difference\\\")\",\n \"_____no_output_____\"\n ],\n [\n \"diff_escore\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"The tutorial continues in the [advanced notebook](sdba-advanced.ipynb) with more on optimization with dask, other fancier detrending algorithms and an example pipeline for heavy processing.\",\n \"_____no_output_____\"\n ]\n ]\n]"},"cell_types":{"kind":"list like","value":["markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown"],"string":"[\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\"\n]"},"cell_type_groups":{"kind":"list like","value":[["markdown"],["code","code"],["markdown"],["code"],["markdown"],["code","code"],["markdown"],["code","code","code"],["markdown"],["code","code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code","code","code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code","code"],["markdown"],["code"],["markdown"],["code","code","code"],["markdown"]],"string":"[\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ]\n]"}}},{"rowIdx":1459054,"cells":{"hexsha":{"kind":"string","value":"e7ef261f645ff1e1f53ef5d166ea00aac1cf3a91"},"size":{"kind":"number","value":69132,"string":"69,132"},"ext":{"kind":"string","value":"ipynb"},"lang":{"kind":"string","value":"Jupyter Notebook"},"max_stars_repo_path":{"kind":"string","value":"notebooks/Working with PDBsum in Jupyter Basics.ipynb"},"max_stars_repo_name":{"kind":"string","value":"fomightez/pdbsum-binder"},"max_stars_repo_head_hexsha":{"kind":"string","value":"accddd4d17d053694241c1e91d34e9e2aac80b03"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"notebooks/Working with PDBsum in Jupyter Basics.ipynb"},"max_issues_repo_name":{"kind":"string","value":"fomightez/pdbsum-binder"},"max_issues_repo_head_hexsha":{"kind":"string","value":"accddd4d17d053694241c1e91d34e9e2aac80b03"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"notebooks/Working with PDBsum in Jupyter Basics.ipynb"},"max_forks_repo_name":{"kind":"string","value":"fomightez/pdbsum-binder"},"max_forks_repo_head_hexsha":{"kind":"string","value":"accddd4d17d053694241c1e91d34e9e2aac80b03"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"number","value":1,"string":"1"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2021-06-23T23:46:41.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2021-06-23T23:46:41.000Z"},"avg_line_length":{"kind":"number","value":36.7723404255,"string":"36.77234"},"max_line_length":{"kind":"number","value":934,"string":"934"},"alphanum_fraction":{"kind":"number","value":0.4109240294,"string":"0.410924"},"cells":{"kind":"list like","value":[[["# Working with PDBsum in Jupyter & Demonstration of PDBsum protein interface data to dataframe script","_____no_output_____"],["Usually you'll want to get some data from PDBsum and analyze it. For the current example in this series of notebooks, I'll cover how to bring in a file of protein-protein interactions and then progress through using that in combination with Python to analyze the results and ultimately compare the results to a different structure.\n\n-----\n\n
\n

If you haven't used one of these notebooks before, they're basically web pages in which you can write, edit, and run live code. They're meant to encourage experimentation, so don't feel nervous. Just try running a few cells and see what happens!.

\n\n

\n Some tips:\n

    \n
  • Code cells have boxes around them. When you hover over them an icon appears.
  • \n
  • To run a code cell either click the icon, or click on the cell and then hit Shift+Enter. The Shift+Enter combo will also move you to the next cell, so it's a quick way to work through the notebook.
  • \n
  • While a cell is running a * appears in the square brackets next to the cell. Once the cell has finished running the asterisk will be replaced with a number.
  • \n
  • In most cases you'll want to start from the top of notebook and work your way down running each cell in turn. Later cells might depend on the results of earlier ones.
  • \n
  • To edit a code cell, just click on it and type stuff. Remember to run the cell once you've finished editing.
  • \n
\n

\n
\n\n----","_____no_output_____"],["### Retrieving Protein-Protein interface reports/ the list of interactions\n\n#### Getting list of interactions between two proteins under individual entries under PDBsum's 'Prot-prot' tab via command line.\n\nSay example from [here](http://www.ebi.ac.uk/thornton-srv/databases/cgi-bin/pdbsum/GetPage.pl?pdbcode=6ah3&template=interfaces.html&o=RESIDUE&l=3) links to the following as 'List of\ninteractions' in the bottom right of the page:\n\n```text \nhttp://www.ebi.ac.uk/thornton-srv/databases/cgi-bin/pdbsum/GetIface.pl?pdb=6ah3&chain1=B&chain2=G\n```\n \nThen based on suggestion at top [here](https://stackoverflow.com/a/52363117/8508004) that would be used in a curl command where the items after the `?` in the original URL get placed into quotes and provided following the `--data` flag argument option in the call to `curl`, like so:\n```text\ncurl -L -o data.txt --data \"pdb=6ah3&chain1=B&chain2=G\" http://www.ebi.ac.uk/thornton-srv/databases/cgi-bin/pdbsum/GetIface.pl\n```\n\n**Specifically**, the `--data \"pdb=6ah3&chain1=B&chain2=G\"` is the part coming from the end of the original URL.\n\n\nPutting that into action in Jupyter to fetch for the example the interactions list in a text:","_____no_output_____"]],[["!curl -L -o data.txt --data \"pdb=6ah3&chain1=B&chain2=G\" http://www.ebi.ac.uk/thornton-srv/databases/cgi-bin/pdbsum/GetIface.pl"," % Total % Received % Xferd Average Speed Time Time Time Current\n Dload Upload Total Spent Left Speed\n100 7063 0 7037 100 26 9033 33 --:--:-- --:--:-- --:--:-- 9055\n"]],[["To prove that the data file has been retieved, we'll show the first 16 lines of it by running the next cell:","_____no_output_____"]],[["!head -16 data.txt","
\r\nList of atom-atom interactions across protein-protein interface\r\n---------------------------------------------------------------\r\n

\r\n PDB code: 6ah3 Chains B }{ G\r\n ------------------------------\r\n

\r\n\r\nHydrogen bonds\r\n--------------\r\n\r\n <----- A T O M 1 -----> <----- A T O M 2 ----->\r\n\r\n Atom Atom Res Res Atom Atom Res Res\r\n no. name name no. Chain no. name name no. Chain Distance\r\n 1. 9937 NZ LYS 326 B <--> 20598 O LYS 122 G 2.47\r\n"]],[["Later in this series of notebooks, I'll demonstrate how to make this step even easier with just the PDB entry id and the chains you are interested in and the later how to loop on this process to get multiple data files for interactions from different structures.","_____no_output_____"],["### Making a Pandas dataframe from the interactions file\n\nTo convert the data to a dataframe, we'll use a script.\n\n If you haven't encountered Pandas dataframes before I suggest you see the first two notebooks that come up with you launch a session from my [blast-binder](https://github.com/fomightez/blast-binder) site. Those first two notebooks cover using the dataframe containing BLAST results some. \n \nTo get that script, you can run the next cell. (It is not included in the repository where this launches from to insure you always get the most current version, which is assumed to be the best available at the time.)","_____no_output_____"]],[["!curl -OL https://raw.githubusercontent.com/fomightez/structurework/master/pdbsum-utilities/pdbsum_prot_interactions_list_to_df.py"," % Total % Received % Xferd Average Speed Time Time Time Current\n Dload Upload Total Spent Left Speed\n100 23915 100 23915 0 0 35272 0 --:--:-- --:--:-- --:--:-- 35220\n"]],[["We have the script now. And we already have a data file for it to process. To process the data file, run the next command where we use Python to run the script and direct it at the results file, `data.txt`, we made just a few cells ago.","_____no_output_____"]],[["%run pdbsum_prot_interactions_list_to_df.py data.txt","Provided interactions data read and converted to a dataframe...\n\nA dataframe of the data has been saved as a file\nin a manner where other Python programs can access it (pickled form).\nRESULTING DATAFRAME is stored as ==> 'prot_int_pickled_df.pkl'"]],[["As of writing this, the script we are using outputs a file that is a binary, compact form of the dataframe. (That means it is tiny and not human readable. It is called 'pickled'. Saving in that form may seem odd, but as illustrated [here](#Output-to-more-universal,-table-like-formats) below this is is a very malleable form. And even more pertinent for dealing with data in Jupyter notebooks, there is actually an easier way to interact with this script when in Jupyter notebook that skips saving this intermediate file. So hang on through the long, more trandtional way of doing this before the easier way is introduced. And I saved it in the compact form and not the mroe typical tab-delimited form because we mostly won't go this route and might as well make tiny files while working along to a better route. It is easy to convert back and forth using the pickled form assuming you can match the Pandas/Python versions.)\n\nWe can take that file where the dataframe is pickled, and bring it into active memory in this notebook with another command form the Pandas library. First, we have to import the Pandas library.\nRun the next command to bring the dataframe into active memory. Note the name comes from the name noted when we ran the script in the cell above.","_____no_output_____"]],[["import pandas as pd\ndf = pd.read_pickle(\"prot_int_pickled_df.pkl\")","_____no_output_____"]],[["When that last cell ran, you won't notice any output, but something happened. We can look at that dataframe by calling it in a cell.","_____no_output_____"]],[["df","_____no_output_____"]],[["You'll notice that if the list of data is large, that the Jupyter environment represents just the head and tail to make it more reasonable. There are ways you can have Jupyter display it all which we won't go into here. \n\nInstead we'll start to show some methods of dataframes that make them convenient. For example, you can use the `head` method to see the start like we used on the command line above.","_____no_output_____"]],[["df.head()","_____no_output_____"]],[["Now what types of interactions are observed for this pair of interacting protein chains?\n\nTo help answer that, we can group the results by the type column.","_____no_output_____"]],[["grouped = df.groupby('type')\nfor type, grouped_df in grouped:\n print(type)\n display(grouped_df)","Hydrogen bonds\n"]],[["Same data as earlier but we can cleary see we have Hydrogen bonds, Non-bonded contacts (a.k.a., van der Waals contacts), and salt bridges, and we immediately get a sense of what types of interactions are more abundant.\n\nYou may want to get a sense of what else you can do by examining he first two notebooks that come up with you launch a session from my [blast-binder](https://github.com/fomightez/blast-binder) site. Those first two notebooks cover using the dataframe containing BLAST results some.\n\nShortly, we'll cover how to bring the dataframe we just made into the notebook without dealing with a file intermediate; however, next I'll demonstrate how to save it as text for use elsewhere, such as in Excel.","_____no_output_____"],["## Output to more universal, table-like formats\n\nI've tried to sell you on the power of the Python/Pandas dataframe, but it isn't for all uses or everyone. However, most everyone is accustomed to dealing with text based tables or even Excel. In fact, a text-based based table perhaps tab or comma-delimited would be the better way to archive the data we are generating here. Python/Pandas makes it easy to go from the dataframe form to these tabular forms. You can even go back later from the table to the dataframe, which may be inportant if you are going to different versions of Python/Pandas as I briefly mentioned parenthetically above.\n\n**First, generating a text-based table.**","_____no_output_____"]],[["#Save / write a TSV-formatted (tab-separated values/ tab-delimited) file\ndf.to_csv('pdbsum_data.tsv', sep='\\t',index = False) #add `,header=False` to leave off header, too","_____no_output_____"]],[["Because `df.to_csv()` defaults to dealing with csv, you can simply use `df.to_csv('example.csv',index = False)` for comma-delimited (comma-separated) files.\n\nYou can see that worked by looking at the first few lines with the next command. (Feel free to make the number higher or delete the number all together. I restricted it just to first line to make output smaller.)","_____no_output_____"]],[["!head -5 pdbsum_data.tsv","Atom1 no.\tAtom1 name\tAtom1 Res name\tAtom1 Res no.\tAtom1 Chain\tAtom2 no.\tAtom2 name\tAtom2 Res name\tAtom2 Res no.\tAtom2 Chain\tDistance\ttype\r\n9937\tNZ\tLYS\t326\tB\t20598\tO\tLYS\t122\tG\t2.47\tHydrogen bonds\r\n9591\tO\tCYS\t280\tB\t19928\tCG1\tILE\t29\tG\t3.77\tNon-bonded contacts\r\n9591\tO\tCYS\t280\tB\t19930\tCD1\tILE\t29\tG\t3.42\tNon-bonded contacts\r\n9593\tSG\tCYS\t280\tB\t19872\tNZ\tLYS\t22\tG\t3.81\tNon-bonded contacts\r\n"]],[["If you had need to go back from a tab-separated table to a dataframe, you can run something like in the following cell.","_____no_output_____"]],[["reverted_df = pd.read_csv('pdbsum_data.tsv', sep='\\t')\nreverted_df.to_pickle('reverted_df.pkl') # OPTIONAL: pickle that data too","_____no_output_____"]],[["For a comma-delimited (CSV) file you'd use `df = pd.read_csv('example.csv')` because `pd.read_csv()` method defaults to comma as the separator (`sep` parameter).\n\nYou can verify that read from the text-based table by viewing it with the next line.","_____no_output_____"]],[["reverted_df.head()","_____no_output_____"]],[["**Generating an Excel spreadsheet from a dataframe.**\n\nBecause this is an specialized need, there is a special module needed that I didn't bother installing by default and so it needs to be installed before generating the Excel file. Running the next cell will do both.","_____no_output_____"]],[["%pip install openpyxl\n# save to excel (KEEPS multiINDEX, and makes sparse to look good in Excel straight out of Python)\ndf.to_excel('pdbsum_data.xlsx') # after openpyxl installed","Requirement already satisfied: openpyxl in /srv/conda/envs/notebook/lib/python3.7/site-packages (3.0.6)\nRequirement already satisfied: et-xmlfile in /srv/conda/envs/notebook/lib/python3.7/site-packages (from openpyxl) (1.0.1)\nRequirement already satisfied: jdcal in /srv/conda/envs/notebook/lib/python3.7/site-packages (from openpyxl) (1.4.1)\nNote: you may need to restart the kernel to use updated packages.\n"]],[["You'll need to download the file first to your computer and then view it locally as there is no viewer in the Jupyter environment.\n\nAdiitionally, it is possible to add styles to dataframes and the styles such as shading of cells and coloring of text will be translated to the Excel document made as well.\n\nExcel files can be read in to Pandas dataframes directly without needing to go to a text based intermediate first.","_____no_output_____"]],[["# read Excel\ndf_from_excel = pd.read_excel('pdbsum_data.xlsx',engine='openpyxl') # see https://stackoverflow.com/a/65266270/8508004 where notes xlrd no longer supports xlsx","Collecting xlrd\n Downloading xlrd-2.0.1-py2.py3-none-any.whl (96 kB)\n\u001b[K |████████████████████████████████| 96 kB 2.8 MB/s eta 0:00:011\n\u001b[?25hInstalling collected packages: xlrd\nSuccessfully installed xlrd-2.0.1\nNote: you may need to restart the kernel to use updated packages.\n"]],[["That can be viewed to convince yourself it worked by running the next command.","_____no_output_____"]],[["df_from_excel.head()","_____no_output_____"]],[["Next, we'll cover how to bring the dataframe we just made into the notebook without dealing with a file intermediate.\n\n----\n\n### Making a Pandas dataframe from the interactions file directly in Jupyter\n\nFirst we'll check for the script we'll use and get it if we don't already have it. \n\n(The thinking is once you know what you are doing you may have skipped all the steps above and not have the script you'll need yet. It cannot hurt to check and if it isn't present, bring it here.)","_____no_output_____"]],[["# Get a file if not yet retrieved / check if file exists\nimport os\nfile_needed = \"pdbsum_prot_interactions_list_to_df.py\"\nif not os.path.isfile(file_needed):\n !curl -OL https://raw.githubusercontent.com/fomightez/structurework/master/pdbsum-utilities/{file_needed}","_____no_output_____"]],[["This is going to rely on approaches very similar to those illustrated [here](https://github.com/fomightez/patmatch-binder/blob/6f7630b2ee061079a72cd117127328fd1abfa6c7/notebooks/PatMatch%20with%20more%20Python.ipynb#Passing-results-data-into-active-memory-without-a-file-intermediate) and [here](https://github.com/fomightez/patmatch-binder/blob/6f7630b2ee061079a72cd117127328fd1abfa6c7/notebooks/Sending%20PatMatch%20output%20directly%20to%20Python.ipynb##Running-Patmatch-and-passing-the-results-to-Python-without-creating-an-output-file-intermediate).\n\nWe obtained the `pdbsum_prot_interactions_list_to_df.py` script in the preparation steps above. However, instead of using it as an external script as we did earlier in this notebook, we want to use the core function of that script within this notebook for the options that involve no pickled-object file intermediate. Similar to the way we imported a lot of other useful modules in the first notebook and a cell above, you can run the next cell to bring in to memory of this notebook's computational environment, the main function associated with the `pdbsum_prot_interactions_list_to_df.py` script, aptly named `pdbsum_prot_interactions_list_to_df`. (As written below the command to do that looks a bit redundant;however, the first from part of the command below actually is referencing the `pdbsum_prot_interactions_list_to_df.py` script, but it doesn't need the `.py` extension because the import only deals with such files.)","_____no_output_____"]],[["from pdbsum_prot_interactions_list_to_df import pdbsum_prot_interactions_list_to_df","_____no_output_____"]],[["We can demonstrate that worked by calling the function.","_____no_output_____"]],[["pdbsum_prot_interactions_list_to_df()","_____no_output_____"]],[["If the module was not imported, you'd see `ModuleNotFoundError: No module named 'pdbsum_prot_interactions_list_to_df'`, but instead you should see it saying it is missing `data_file` to act on because you passed it nothing.\n\nAfter importing the main function of that script into this running notebook, you are ready to demonstrate the approach that doesn't require a file intermediates. The imported `pdbsum_prot_interactions_list_to_df` function is used within the computational environment of the notebook and the dataframe produced assigned to a variable in the running the notebook. In the end, the results are in an active dataframe in the notebook without needing to read the pickled dataframe. **Although bear in mind the pickled dataframe still gets made, and it is good to download and keep that pickled dataframe since you'll find it convenient for reading and getting back into an analysis without need for rerunning earlier steps again.**","_____no_output_____"]],[["direct_df = pdbsum_prot_interactions_list_to_df(\"data.txt\")\ndirect_df.head()","Provided interactions data read and converted to a dataframe...\n\nA dataframe of the data has been saved as a file\nin a manner where other Python programs can access it (pickled form).\nRESULTING DATAFRAME is stored as ==> 'prot_int_pickled_df.pkl'\n\nReturning a dataframe with the information as well."]],[["This may be how you prefer to use the script. Either option exists.\n\n----\n\nContinue on with the next notebook in the series, [Using PDBsum data to highlight changes in protein-protein interactions](Using%20PDBsum%20data%20to%20highlight%20changes%20in%20protein-protein%20interactions.ipynb). That notebook builds on the ground work here to demonstrate how to examine similarities and differences in specific residue-level interactions between the same chains in different, related structures.\n\n----","_____no_output_____"]]],"string":"[\n [\n [\n \"# Working with PDBsum in Jupyter & Demonstration of PDBsum protein interface data to dataframe script\",\n \"_____no_output_____\"\n ],\n [\n \"Usually you'll want to get some data from PDBsum and analyze it. For the current example in this series of notebooks, I'll cover how to bring in a file of protein-protein interactions and then progress through using that in combination with Python to analyze the results and ultimately compare the results to a different structure.\\n\\n-----\\n\\n

\\n

If you haven't used one of these notebooks before, they're basically web pages in which you can write, edit, and run live code. They're meant to encourage experimentation, so don't feel nervous. Just try running a few cells and see what happens!.

\\n\\n

\\n Some tips:\\n

    \\n
  • Code cells have boxes around them. When you hover over them an icon appears.
  • \\n
  • To run a code cell either click the icon, or click on the cell and then hit Shift+Enter. The Shift+Enter combo will also move you to the next cell, so it's a quick way to work through the notebook.
  • \\n
  • While a cell is running a * appears in the square brackets next to the cell. Once the cell has finished running the asterisk will be replaced with a number.
  • \\n
  • In most cases you'll want to start from the top of notebook and work your way down running each cell in turn. Later cells might depend on the results of earlier ones.
  • \\n
  • To edit a code cell, just click on it and type stuff. Remember to run the cell once you've finished editing.
  • \\n
\\n

\\n
\\n\\n----\",\n \"_____no_output_____\"\n ],\n [\n \"### Retrieving Protein-Protein interface reports/ the list of interactions\\n\\n#### Getting list of interactions between two proteins under individual entries under PDBsum's 'Prot-prot' tab via command line.\\n\\nSay example from [here](http://www.ebi.ac.uk/thornton-srv/databases/cgi-bin/pdbsum/GetPage.pl?pdbcode=6ah3&template=interfaces.html&o=RESIDUE&l=3) links to the following as 'List of\\ninteractions' in the bottom right of the page:\\n\\n```text \\nhttp://www.ebi.ac.uk/thornton-srv/databases/cgi-bin/pdbsum/GetIface.pl?pdb=6ah3&chain1=B&chain2=G\\n```\\n \\nThen based on suggestion at top [here](https://stackoverflow.com/a/52363117/8508004) that would be used in a curl command where the items after the `?` in the original URL get placed into quotes and provided following the `--data` flag argument option in the call to `curl`, like so:\\n```text\\ncurl -L -o data.txt --data \\\"pdb=6ah3&chain1=B&chain2=G\\\" http://www.ebi.ac.uk/thornton-srv/databases/cgi-bin/pdbsum/GetIface.pl\\n```\\n\\n**Specifically**, the `--data \\\"pdb=6ah3&chain1=B&chain2=G\\\"` is the part coming from the end of the original URL.\\n\\n\\nPutting that into action in Jupyter to fetch for the example the interactions list in a text:\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"!curl -L -o data.txt --data \\\"pdb=6ah3&chain1=B&chain2=G\\\" http://www.ebi.ac.uk/thornton-srv/databases/cgi-bin/pdbsum/GetIface.pl\",\n \" % Total % Received % Xferd Average Speed Time Time Time Current\\n Dload Upload Total Spent Left Speed\\n100 7063 0 7037 100 26 9033 33 --:--:-- --:--:-- --:--:-- 9055\\n\"\n ]\n ],\n [\n [\n \"To prove that the data file has been retieved, we'll show the first 16 lines of it by running the next cell:\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"!head -16 data.txt\",\n \"
\\r\\nList of atom-atom interactions across protein-protein interface\\r\\n---------------------------------------------------------------\\r\\n

\\r\\n PDB code: 6ah3 Chains B }{ G\\r\\n ------------------------------\\r\\n

\\r\\n\\r\\nHydrogen bonds\\r\\n--------------\\r\\n\\r\\n <----- A T O M 1 -----> <----- A T O M 2 ----->\\r\\n\\r\\n Atom Atom Res Res Atom Atom Res Res\\r\\n no. name name no. Chain no. name name no. Chain Distance\\r\\n 1. 9937 NZ LYS 326 B <--> 20598 O LYS 122 G 2.47\\r\\n\"\n ]\n ],\n [\n [\n \"Later in this series of notebooks, I'll demonstrate how to make this step even easier with just the PDB entry id and the chains you are interested in and the later how to loop on this process to get multiple data files for interactions from different structures.\",\n \"_____no_output_____\"\n ],\n [\n \"### Making a Pandas dataframe from the interactions file\\n\\nTo convert the data to a dataframe, we'll use a script.\\n\\n If you haven't encountered Pandas dataframes before I suggest you see the first two notebooks that come up with you launch a session from my [blast-binder](https://github.com/fomightez/blast-binder) site. Those first two notebooks cover using the dataframe containing BLAST results some. \\n \\nTo get that script, you can run the next cell. (It is not included in the repository where this launches from to insure you always get the most current version, which is assumed to be the best available at the time.)\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"!curl -OL https://raw.githubusercontent.com/fomightez/structurework/master/pdbsum-utilities/pdbsum_prot_interactions_list_to_df.py\",\n \" % Total % Received % Xferd Average Speed Time Time Time Current\\n Dload Upload Total Spent Left Speed\\n100 23915 100 23915 0 0 35272 0 --:--:-- --:--:-- --:--:-- 35220\\n\"\n ]\n ],\n [\n [\n \"We have the script now. And we already have a data file for it to process. To process the data file, run the next command where we use Python to run the script and direct it at the results file, `data.txt`, we made just a few cells ago.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"%run pdbsum_prot_interactions_list_to_df.py data.txt\",\n \"Provided interactions data read and converted to a dataframe...\\n\\nA dataframe of the data has been saved as a file\\nin a manner where other Python programs can access it (pickled form).\\nRESULTING DATAFRAME is stored as ==> 'prot_int_pickled_df.pkl'\"\n ]\n ],\n [\n [\n \"As of writing this, the script we are using outputs a file that is a binary, compact form of the dataframe. (That means it is tiny and not human readable. It is called 'pickled'. Saving in that form may seem odd, but as illustrated [here](#Output-to-more-universal,-table-like-formats) below this is is a very malleable form. And even more pertinent for dealing with data in Jupyter notebooks, there is actually an easier way to interact with this script when in Jupyter notebook that skips saving this intermediate file. So hang on through the long, more trandtional way of doing this before the easier way is introduced. And I saved it in the compact form and not the mroe typical tab-delimited form because we mostly won't go this route and might as well make tiny files while working along to a better route. It is easy to convert back and forth using the pickled form assuming you can match the Pandas/Python versions.)\\n\\nWe can take that file where the dataframe is pickled, and bring it into active memory in this notebook with another command form the Pandas library. First, we have to import the Pandas library.\\nRun the next command to bring the dataframe into active memory. Note the name comes from the name noted when we ran the script in the cell above.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"import pandas as pd\\ndf = pd.read_pickle(\\\"prot_int_pickled_df.pkl\\\")\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"When that last cell ran, you won't notice any output, but something happened. We can look at that dataframe by calling it in a cell.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"df\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"You'll notice that if the list of data is large, that the Jupyter environment represents just the head and tail to make it more reasonable. There are ways you can have Jupyter display it all which we won't go into here. \\n\\nInstead we'll start to show some methods of dataframes that make them convenient. For example, you can use the `head` method to see the start like we used on the command line above.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"df.head()\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"Now what types of interactions are observed for this pair of interacting protein chains?\\n\\nTo help answer that, we can group the results by the type column.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"grouped = df.groupby('type')\\nfor type, grouped_df in grouped:\\n print(type)\\n display(grouped_df)\",\n \"Hydrogen bonds\\n\"\n ]\n ],\n [\n [\n \"Same data as earlier but we can cleary see we have Hydrogen bonds, Non-bonded contacts (a.k.a., van der Waals contacts), and salt bridges, and we immediately get a sense of what types of interactions are more abundant.\\n\\nYou may want to get a sense of what else you can do by examining he first two notebooks that come up with you launch a session from my [blast-binder](https://github.com/fomightez/blast-binder) site. Those first two notebooks cover using the dataframe containing BLAST results some.\\n\\nShortly, we'll cover how to bring the dataframe we just made into the notebook without dealing with a file intermediate; however, next I'll demonstrate how to save it as text for use elsewhere, such as in Excel.\",\n \"_____no_output_____\"\n ],\n [\n \"## Output to more universal, table-like formats\\n\\nI've tried to sell you on the power of the Python/Pandas dataframe, but it isn't for all uses or everyone. However, most everyone is accustomed to dealing with text based tables or even Excel. In fact, a text-based based table perhaps tab or comma-delimited would be the better way to archive the data we are generating here. Python/Pandas makes it easy to go from the dataframe form to these tabular forms. You can even go back later from the table to the dataframe, which may be inportant if you are going to different versions of Python/Pandas as I briefly mentioned parenthetically above.\\n\\n**First, generating a text-based table.**\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"#Save / write a TSV-formatted (tab-separated values/ tab-delimited) file\\ndf.to_csv('pdbsum_data.tsv', sep='\\\\t',index = False) #add `,header=False` to leave off header, too\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"Because `df.to_csv()` defaults to dealing with csv, you can simply use `df.to_csv('example.csv',index = False)` for comma-delimited (comma-separated) files.\\n\\nYou can see that worked by looking at the first few lines with the next command. (Feel free to make the number higher or delete the number all together. I restricted it just to first line to make output smaller.)\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"!head -5 pdbsum_data.tsv\",\n \"Atom1 no.\\tAtom1 name\\tAtom1 Res name\\tAtom1 Res no.\\tAtom1 Chain\\tAtom2 no.\\tAtom2 name\\tAtom2 Res name\\tAtom2 Res no.\\tAtom2 Chain\\tDistance\\ttype\\r\\n9937\\tNZ\\tLYS\\t326\\tB\\t20598\\tO\\tLYS\\t122\\tG\\t2.47\\tHydrogen bonds\\r\\n9591\\tO\\tCYS\\t280\\tB\\t19928\\tCG1\\tILE\\t29\\tG\\t3.77\\tNon-bonded contacts\\r\\n9591\\tO\\tCYS\\t280\\tB\\t19930\\tCD1\\tILE\\t29\\tG\\t3.42\\tNon-bonded contacts\\r\\n9593\\tSG\\tCYS\\t280\\tB\\t19872\\tNZ\\tLYS\\t22\\tG\\t3.81\\tNon-bonded contacts\\r\\n\"\n ]\n ],\n [\n [\n \"If you had need to go back from a tab-separated table to a dataframe, you can run something like in the following cell.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"reverted_df = pd.read_csv('pdbsum_data.tsv', sep='\\\\t')\\nreverted_df.to_pickle('reverted_df.pkl') # OPTIONAL: pickle that data too\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"For a comma-delimited (CSV) file you'd use `df = pd.read_csv('example.csv')` because `pd.read_csv()` method defaults to comma as the separator (`sep` parameter).\\n\\nYou can verify that read from the text-based table by viewing it with the next line.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"reverted_df.head()\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"**Generating an Excel spreadsheet from a dataframe.**\\n\\nBecause this is an specialized need, there is a special module needed that I didn't bother installing by default and so it needs to be installed before generating the Excel file. Running the next cell will do both.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"%pip install openpyxl\\n# save to excel (KEEPS multiINDEX, and makes sparse to look good in Excel straight out of Python)\\ndf.to_excel('pdbsum_data.xlsx') # after openpyxl installed\",\n \"Requirement already satisfied: openpyxl in /srv/conda/envs/notebook/lib/python3.7/site-packages (3.0.6)\\nRequirement already satisfied: et-xmlfile in /srv/conda/envs/notebook/lib/python3.7/site-packages (from openpyxl) (1.0.1)\\nRequirement already satisfied: jdcal in /srv/conda/envs/notebook/lib/python3.7/site-packages (from openpyxl) (1.4.1)\\nNote: you may need to restart the kernel to use updated packages.\\n\"\n ]\n ],\n [\n [\n \"You'll need to download the file first to your computer and then view it locally as there is no viewer in the Jupyter environment.\\n\\nAdiitionally, it is possible to add styles to dataframes and the styles such as shading of cells and coloring of text will be translated to the Excel document made as well.\\n\\nExcel files can be read in to Pandas dataframes directly without needing to go to a text based intermediate first.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# read Excel\\ndf_from_excel = pd.read_excel('pdbsum_data.xlsx',engine='openpyxl') # see https://stackoverflow.com/a/65266270/8508004 where notes xlrd no longer supports xlsx\",\n \"Collecting xlrd\\n Downloading xlrd-2.0.1-py2.py3-none-any.whl (96 kB)\\n\\u001b[K |████████████████████████████████| 96 kB 2.8 MB/s eta 0:00:011\\n\\u001b[?25hInstalling collected packages: xlrd\\nSuccessfully installed xlrd-2.0.1\\nNote: you may need to restart the kernel to use updated packages.\\n\"\n ]\n ],\n [\n [\n \"That can be viewed to convince yourself it worked by running the next command.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"df_from_excel.head()\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"Next, we'll cover how to bring the dataframe we just made into the notebook without dealing with a file intermediate.\\n\\n----\\n\\n### Making a Pandas dataframe from the interactions file directly in Jupyter\\n\\nFirst we'll check for the script we'll use and get it if we don't already have it. \\n\\n(The thinking is once you know what you are doing you may have skipped all the steps above and not have the script you'll need yet. It cannot hurt to check and if it isn't present, bring it here.)\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# Get a file if not yet retrieved / check if file exists\\nimport os\\nfile_needed = \\\"pdbsum_prot_interactions_list_to_df.py\\\"\\nif not os.path.isfile(file_needed):\\n !curl -OL https://raw.githubusercontent.com/fomightez/structurework/master/pdbsum-utilities/{file_needed}\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"This is going to rely on approaches very similar to those illustrated [here](https://github.com/fomightez/patmatch-binder/blob/6f7630b2ee061079a72cd117127328fd1abfa6c7/notebooks/PatMatch%20with%20more%20Python.ipynb#Passing-results-data-into-active-memory-without-a-file-intermediate) and [here](https://github.com/fomightez/patmatch-binder/blob/6f7630b2ee061079a72cd117127328fd1abfa6c7/notebooks/Sending%20PatMatch%20output%20directly%20to%20Python.ipynb##Running-Patmatch-and-passing-the-results-to-Python-without-creating-an-output-file-intermediate).\\n\\nWe obtained the `pdbsum_prot_interactions_list_to_df.py` script in the preparation steps above. However, instead of using it as an external script as we did earlier in this notebook, we want to use the core function of that script within this notebook for the options that involve no pickled-object file intermediate. Similar to the way we imported a lot of other useful modules in the first notebook and a cell above, you can run the next cell to bring in to memory of this notebook's computational environment, the main function associated with the `pdbsum_prot_interactions_list_to_df.py` script, aptly named `pdbsum_prot_interactions_list_to_df`. (As written below the command to do that looks a bit redundant;however, the first from part of the command below actually is referencing the `pdbsum_prot_interactions_list_to_df.py` script, but it doesn't need the `.py` extension because the import only deals with such files.)\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"from pdbsum_prot_interactions_list_to_df import pdbsum_prot_interactions_list_to_df\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"We can demonstrate that worked by calling the function.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"pdbsum_prot_interactions_list_to_df()\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"If the module was not imported, you'd see `ModuleNotFoundError: No module named 'pdbsum_prot_interactions_list_to_df'`, but instead you should see it saying it is missing `data_file` to act on because you passed it nothing.\\n\\nAfter importing the main function of that script into this running notebook, you are ready to demonstrate the approach that doesn't require a file intermediates. The imported `pdbsum_prot_interactions_list_to_df` function is used within the computational environment of the notebook and the dataframe produced assigned to a variable in the running the notebook. In the end, the results are in an active dataframe in the notebook without needing to read the pickled dataframe. **Although bear in mind the pickled dataframe still gets made, and it is good to download and keep that pickled dataframe since you'll find it convenient for reading and getting back into an analysis without need for rerunning earlier steps again.**\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"direct_df = pdbsum_prot_interactions_list_to_df(\\\"data.txt\\\")\\ndirect_df.head()\",\n \"Provided interactions data read and converted to a dataframe...\\n\\nA dataframe of the data has been saved as a file\\nin a manner where other Python programs can access it (pickled form).\\nRESULTING DATAFRAME is stored as ==> 'prot_int_pickled_df.pkl'\\n\\nReturning a dataframe with the information as well.\"\n ]\n ],\n [\n [\n \"This may be how you prefer to use the script. Either option exists.\\n\\n----\\n\\nContinue on with the next notebook in the series, [Using PDBsum data to highlight changes in protein-protein interactions](Using%20PDBsum%20data%20to%20highlight%20changes%20in%20protein-protein%20interactions.ipynb). That notebook builds on the ground work here to demonstrate how to examine similarities and differences in specific residue-level interactions between the same chains in different, related structures.\\n\\n----\",\n \"_____no_output_____\"\n ]\n ]\n]"},"cell_types":{"kind":"list like","value":["markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown"],"string":"[\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\"\n]"},"cell_type_groups":{"kind":"list like","value":[["markdown","markdown","markdown"],["code"],["markdown"],["code"],["markdown","markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown","markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"]],"string":"[\n [\n \"markdown\",\n \"markdown\",\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\",\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\",\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ]\n]"}}},{"rowIdx":1459055,"cells":{"hexsha":{"kind":"string","value":"e7ef2c0cc8bcff19ecfffc4a98022bbbf3122422"},"size":{"kind":"number","value":730,"string":"730"},"ext":{"kind":"string","value":"ipynb"},"lang":{"kind":"string","value":"Jupyter Notebook"},"max_stars_repo_path":{"kind":"string","value":"sp20/2020-03-26-meeting08/2020-03-26-meeting08.ipynb"},"max_stars_repo_name":{"kind":"string","value":"brandons209/supplementary"},"max_stars_repo_head_hexsha":{"kind":"string","value":"2940da71101d3c4a86002cf2291ec579b699521f"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"sp20/2020-03-26-meeting08/2020-03-26-meeting08.ipynb"},"max_issues_repo_name":{"kind":"string","value":"brandons209/supplementary"},"max_issues_repo_head_hexsha":{"kind":"string","value":"2940da71101d3c4a86002cf2291ec579b699521f"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"sp20/2020-03-26-meeting08/2020-03-26-meeting08.ipynb"},"max_forks_repo_name":{"kind":"string","value":"brandons209/supplementary"},"max_forks_repo_head_hexsha":{"kind":"string","value":"2940da71101d3c4a86002cf2291ec579b699521f"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"avg_line_length":{"kind":"number","value":20.8571428571,"string":"20.857143"},"max_line_length":{"kind":"number","value":108,"string":"108"},"alphanum_fraction":{"kind":"number","value":0.4575342466,"string":"0.457534"},"cells":{"kind":"list like","value":[[["empty"]]],"string":"[\n [\n [\n \"empty\"\n ]\n ]\n]"},"cell_types":{"kind":"list like","value":["empty"],"string":"[\n \"empty\"\n]"},"cell_type_groups":{"kind":"list like","value":[["empty"]],"string":"[\n [\n \"empty\"\n ]\n]"}}},{"rowIdx":1459056,"cells":{"hexsha":{"kind":"string","value":"e7ef316eda609a111d0f96deff2688681b96c156"},"size":{"kind":"number","value":21019,"string":"21,019"},"ext":{"kind":"string","value":"ipynb"},"lang":{"kind":"string","value":"Jupyter Notebook"},"max_stars_repo_path":{"kind":"string","value":"titanic_classfication.ipynb"},"max_stars_repo_name":{"kind":"string","value":"jhee-yun/test_machinelearning1"},"max_stars_repo_head_hexsha":{"kind":"string","value":"9787930996b0f44a155a9c8656ec25783ddf42e0"},"max_stars_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"titanic_classfication.ipynb"},"max_issues_repo_name":{"kind":"string","value":"jhee-yun/test_machinelearning1"},"max_issues_repo_head_hexsha":{"kind":"string","value":"9787930996b0f44a155a9c8656ec25783ddf42e0"},"max_issues_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"titanic_classfication.ipynb"},"max_forks_repo_name":{"kind":"string","value":"jhee-yun/test_machinelearning1"},"max_forks_repo_head_hexsha":{"kind":"string","value":"9787930996b0f44a155a9c8656ec25783ddf42e0"},"max_forks_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"avg_line_length":{"kind":"number","value":31.8952959029,"string":"31.895296"},"max_line_length":{"kind":"number","value":1323,"string":"1,323"},"alphanum_fraction":{"kind":"number","value":0.4219039916,"string":"0.421904"},"cells":{"kind":"list like","value":[[["# !python -m pip install seaborn","_____no_output_____"],["# %load_ext autoreload\n# %autoreload 2","_____no_output_____"],["import seaborn as sns","_____no_output_____"],["df = sns.load_dataset('titanic')\ndf.shape","_____no_output_____"],["df.info()","\nRangeIndex: 891 entries, 0 to 890\nData columns (total 15 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 survived 891 non-null int64 \n 1 pclass 891 non-null int64 \n 2 sex 891 non-null object \n 3 age 714 non-null float64 \n 4 sibsp 891 non-null int64 \n 5 parch 891 non-null int64 \n 6 fare 891 non-null float64 \n 7 embarked 889 non-null object \n 8 class 891 non-null category\n 9 who 891 non-null object \n 10 adult_male 891 non-null bool \n 11 deck 203 non-null category\n 12 embark_town 889 non-null object \n 13 alive 891 non-null object \n 14 alone 891 non-null bool \ndtypes: bool(2), category(2), float64(2), int64(4), object(5)\nmemory usage: 80.6+ KB\n"]],[["survived, pclass, sibsp, parch, fare","_____no_output_____"]],[["X = df[['pclass', 'sibsp', 'parch', 'fare']]\nY = df[['survived']]\nX.shape, Y.shape","_____no_output_____"],["from sklearn.model_selection import train_test_split","_____no_output_____"],["x_train, x_test, y_train, y_test = train_test_split(X, Y)\nx_train.shape, x_test.shape, y_train.shape, y_test.shape","_____no_output_____"],["from sklearn.linear_model import LogisticRegression","_____no_output_____"],["logR = LogisticRegression()\ntype(logR)","_____no_output_____"],["logR.fit(x_train, y_train)","/usr/local/lib/python3.7/dist-packages/sklearn/utils/validation.py:760: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n y = column_or_1d(y, warn=True)\n"],["logR.classes_","_____no_output_____"],["logR.coef_\n# 'pclass', 'sibsp', 'parch', 'fare'","_____no_output_____"],["logR.score(x_train, y_train)","_____no_output_____"],["logR.predict(x_train)","_____no_output_____"],["logR.predict_proba(x_train)","_____no_output_____"],["logR.predict_proba(x_train[10:13])","_____no_output_____"],["0.41873577+0.58126423","_____no_output_____"],["logR.predict(x_train[10:13])","_____no_output_____"],["print('Hello')","Hello\n"],["from sklearn import metrics","_____no_output_____"],["metrics.confusion_matrix(x_train, y_train)","_____no_output_____"],["","_____no_output_____"]]],"string":"[\n [\n [\n \"# !python -m pip install seaborn\",\n \"_____no_output_____\"\n ],\n [\n \"# %load_ext autoreload\\n# %autoreload 2\",\n \"_____no_output_____\"\n ],\n [\n \"import seaborn as sns\",\n \"_____no_output_____\"\n ],\n [\n \"df = sns.load_dataset('titanic')\\ndf.shape\",\n \"_____no_output_____\"\n ],\n [\n \"df.info()\",\n \"\\nRangeIndex: 891 entries, 0 to 890\\nData columns (total 15 columns):\\n # Column Non-Null Count Dtype \\n--- ------ -------------- ----- \\n 0 survived 891 non-null int64 \\n 1 pclass 891 non-null int64 \\n 2 sex 891 non-null object \\n 3 age 714 non-null float64 \\n 4 sibsp 891 non-null int64 \\n 5 parch 891 non-null int64 \\n 6 fare 891 non-null float64 \\n 7 embarked 889 non-null object \\n 8 class 891 non-null category\\n 9 who 891 non-null object \\n 10 adult_male 891 non-null bool \\n 11 deck 203 non-null category\\n 12 embark_town 889 non-null object \\n 13 alive 891 non-null object \\n 14 alone 891 non-null bool \\ndtypes: bool(2), category(2), float64(2), int64(4), object(5)\\nmemory usage: 80.6+ KB\\n\"\n ]\n ],\n [\n [\n \"survived, pclass, sibsp, parch, fare\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"X = df[['pclass', 'sibsp', 'parch', 'fare']]\\nY = df[['survived']]\\nX.shape, Y.shape\",\n \"_____no_output_____\"\n ],\n [\n \"from sklearn.model_selection import train_test_split\",\n \"_____no_output_____\"\n ],\n [\n \"x_train, x_test, y_train, y_test = train_test_split(X, Y)\\nx_train.shape, x_test.shape, y_train.shape, y_test.shape\",\n \"_____no_output_____\"\n ],\n [\n \"from sklearn.linear_model import LogisticRegression\",\n \"_____no_output_____\"\n ],\n [\n \"logR = LogisticRegression()\\ntype(logR)\",\n \"_____no_output_____\"\n ],\n [\n \"logR.fit(x_train, y_train)\",\n \"/usr/local/lib/python3.7/dist-packages/sklearn/utils/validation.py:760: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\\n y = column_or_1d(y, warn=True)\\n\"\n ],\n [\n \"logR.classes_\",\n \"_____no_output_____\"\n ],\n [\n \"logR.coef_\\n# 'pclass', 'sibsp', 'parch', 'fare'\",\n \"_____no_output_____\"\n ],\n [\n \"logR.score(x_train, y_train)\",\n \"_____no_output_____\"\n ],\n [\n \"logR.predict(x_train)\",\n \"_____no_output_____\"\n ],\n [\n \"logR.predict_proba(x_train)\",\n \"_____no_output_____\"\n ],\n [\n \"logR.predict_proba(x_train[10:13])\",\n \"_____no_output_____\"\n ],\n [\n \"0.41873577+0.58126423\",\n \"_____no_output_____\"\n ],\n [\n \"logR.predict(x_train[10:13])\",\n \"_____no_output_____\"\n ],\n [\n \"print('Hello')\",\n \"Hello\\n\"\n ],\n [\n \"from sklearn import metrics\",\n \"_____no_output_____\"\n ],\n [\n \"metrics.confusion_matrix(x_train, y_train)\",\n \"_____no_output_____\"\n ],\n [\n \"\",\n \"_____no_output_____\"\n ]\n ]\n]"},"cell_types":{"kind":"list like","value":["code","markdown","code"],"string":"[\n \"code\",\n \"markdown\",\n \"code\"\n]"},"cell_type_groups":{"kind":"list like","value":[["code","code","code","code","code"],["markdown"],["code","code","code","code","code","code","code","code","code","code","code","code","code","code","code","code","code","code"]],"string":"[\n [\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\"\n ]\n]"}}},{"rowIdx":1459057,"cells":{"hexsha":{"kind":"string","value":"e7ef4b6efc793e4f7cefbf352056e7fffce4040e"},"size":{"kind":"number","value":249512,"string":"249,512"},"ext":{"kind":"string","value":"ipynb"},"lang":{"kind":"string","value":"Jupyter Notebook"},"max_stars_repo_path":{"kind":"string","value":"docs/tutorials/Read_seqfish.ipynb"},"max_stars_repo_name":{"kind":"string","value":"duypham2108/dev_st"},"max_stars_repo_head_hexsha":{"kind":"string","value":"47adcfa5803eba7549b1185ec69d2317b386d9ff"},"max_stars_repo_licenses":{"kind":"list like","value":["BSD-3-Clause"],"string":"[\n \"BSD-3-Clause\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"docs/tutorials/Read_seqfish.ipynb"},"max_issues_repo_name":{"kind":"string","value":"duypham2108/dev_st"},"max_issues_repo_head_hexsha":{"kind":"string","value":"47adcfa5803eba7549b1185ec69d2317b386d9ff"},"max_issues_repo_licenses":{"kind":"list like","value":["BSD-3-Clause"],"string":"[\n \"BSD-3-Clause\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"docs/tutorials/Read_seqfish.ipynb"},"max_forks_repo_name":{"kind":"string","value":"duypham2108/dev_st"},"max_forks_repo_head_hexsha":{"kind":"string","value":"47adcfa5803eba7549b1185ec69d2317b386d9ff"},"max_forks_repo_licenses":{"kind":"list like","value":["BSD-3-Clause"],"string":"[\n \"BSD-3-Clause\"\n]"},"max_forks_count":{"kind":"number","value":1,"string":"1"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2019-12-12T12:46:55.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2019-12-12T12:46:55.000Z"},"avg_line_length":{"kind":"number","value":967.1007751938,"string":"967.100775"},"max_line_length":{"kind":"number","value":97408,"string":"97,408"},"alphanum_fraction":{"kind":"number","value":0.9569880407,"string":"0.956988"},"cells":{"kind":"list like","value":[[["# Working with SeqFish data","_____no_output_____"]],[["import stlearn as st","_____no_output_____"]],[["The data is downloaded from https://www.spatialomics.org/SpatialDB/download.php\n\n| Technique | PMID | Title | Expression | SV genes|\n| ----------- | ----------- | ----------- | ----------- | ----------- |\n|seqFISH|30911168|Transcriptome-scale super-resolved imaging in tissues by RNA seqFISH+\tseqfish_30911168.tar.gz|seqfish_30911168_SVG.tar.gz\n\nRead SeqFish data and we select field 5.","_____no_output_____"]],[["data = st.ReadSeqFish(count_matrix_file=\"../Downloads/seqfish_30911168/cortex_svz_counts.matrix\",\n spatial_file=\"../Downloads/seqfish_30911168/cortex_svz_cellcentroids.csv\",\n field=5)","D:\\Anaconda3\\envs\\test2\\lib\\site-packages\\anndata-0.7.3-py3.8.egg\\anndata\\_core\\anndata.py:119: ImplicitModificationWarning: Transforming to str index.\n warnings.warn(\"Transforming to str index.\", ImplicitModificationWarning)\n"]],[["Quality checking for the data","_____no_output_____"]],[["st.pl.QC_plot(data)","_____no_output_____"]],[["Plot gene Nr4a1","_____no_output_____"]],[["st.pl.gene_plot(data,genes=\"Nr4a1\")","_____no_output_____"]],[["Running Preprocessing for MERFISH data","_____no_output_____"]],[["st.pp.filter_genes(data,min_cells=3)\nst.pp.normalize_total(data)\nst.pp.log1p(data)\nst.pp.scale(data)","Normalization step is finished in adata.X\nLog transformation step is finished in adata.X\nScale step is finished in adata.X\n"]],[["Running PCA to reduce the dimensions to 50","_____no_output_____"]],[["st.em.run_pca(data,n_comps=50,random_state=0)","PCA is done! Generated in adata.obsm['X_pca'], adata.uns['pca'] and adata.varm['PCs']\n"]],[["Perform Louvain clustering","_____no_output_____"]],[["st.pp.neighbors(data,n_neighbors=25)","D:\\Anaconda3\\envs\\test2\\lib\\site-packages\\umap_learn-0.4.3-py3.8.egg\\umap\\spectral.py:4: NumbaDeprecationWarning: No direct replacement for 'numba.targets' available. Visit https://gitter.im/numba/numba-dev to request help. Thanks!\n import numba.targets\n"],["st.tl.clustering.louvain(data)","Applying Louvain clustering ...\nLouvain clustering is done! The labels are stored in adata.obs['louvain']\n"],["st.pl.cluster_plot(data,use_label=\"louvain\",spot_size=10)","_____no_output_____"]]],"string":"[\n [\n [\n \"# Working with SeqFish data\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"import stlearn as st\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"The data is downloaded from https://www.spatialomics.org/SpatialDB/download.php\\n\\n| Technique | PMID | Title | Expression | SV genes|\\n| ----------- | ----------- | ----------- | ----------- | ----------- |\\n|seqFISH|30911168|Transcriptome-scale super-resolved imaging in tissues by RNA seqFISH+\\tseqfish_30911168.tar.gz|seqfish_30911168_SVG.tar.gz\\n\\nRead SeqFish data and we select field 5.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"data = st.ReadSeqFish(count_matrix_file=\\\"../Downloads/seqfish_30911168/cortex_svz_counts.matrix\\\",\\n spatial_file=\\\"../Downloads/seqfish_30911168/cortex_svz_cellcentroids.csv\\\",\\n field=5)\",\n \"D:\\\\Anaconda3\\\\envs\\\\test2\\\\lib\\\\site-packages\\\\anndata-0.7.3-py3.8.egg\\\\anndata\\\\_core\\\\anndata.py:119: ImplicitModificationWarning: Transforming to str index.\\n warnings.warn(\\\"Transforming to str index.\\\", ImplicitModificationWarning)\\n\"\n ]\n ],\n [\n [\n \"Quality checking for the data\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"st.pl.QC_plot(data)\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"Plot gene Nr4a1\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"st.pl.gene_plot(data,genes=\\\"Nr4a1\\\")\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"Running Preprocessing for MERFISH data\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"st.pp.filter_genes(data,min_cells=3)\\nst.pp.normalize_total(data)\\nst.pp.log1p(data)\\nst.pp.scale(data)\",\n \"Normalization step is finished in adata.X\\nLog transformation step is finished in adata.X\\nScale step is finished in adata.X\\n\"\n ]\n ],\n [\n [\n \"Running PCA to reduce the dimensions to 50\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"st.em.run_pca(data,n_comps=50,random_state=0)\",\n \"PCA is done! Generated in adata.obsm['X_pca'], adata.uns['pca'] and adata.varm['PCs']\\n\"\n ]\n ],\n [\n [\n \"Perform Louvain clustering\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"st.pp.neighbors(data,n_neighbors=25)\",\n \"D:\\\\Anaconda3\\\\envs\\\\test2\\\\lib\\\\site-packages\\\\umap_learn-0.4.3-py3.8.egg\\\\umap\\\\spectral.py:4: NumbaDeprecationWarning: No direct replacement for 'numba.targets' available. Visit https://gitter.im/numba/numba-dev to request help. Thanks!\\n import numba.targets\\n\"\n ],\n [\n \"st.tl.clustering.louvain(data)\",\n \"Applying Louvain clustering ...\\nLouvain clustering is done! The labels are stored in adata.obs['louvain']\\n\"\n ],\n [\n \"st.pl.cluster_plot(data,use_label=\\\"louvain\\\",spot_size=10)\",\n \"_____no_output_____\"\n ]\n ]\n]"},"cell_types":{"kind":"list like","value":["markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code"],"string":"[\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\"\n]"},"cell_type_groups":{"kind":"list like","value":[["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code","code","code"]],"string":"[\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\"\n ]\n]"}}},{"rowIdx":1459058,"cells":{"hexsha":{"kind":"string","value":"e7ef6ae8df756935dc452726afc0bc7ecf00b7a4"},"size":{"kind":"number","value":379554,"string":"379,554"},"ext":{"kind":"string","value":"ipynb"},"lang":{"kind":"string","value":"Jupyter Notebook"},"max_stars_repo_path":{"kind":"string","value":"section-04-research-and-development/02-machine-learning-pipeline-feature-engineering.ipynb"},"max_stars_repo_name":{"kind":"string","value":"chauthinh/machine-learning-deployment"},"max_stars_repo_head_hexsha":{"kind":"string","value":"ac0dd21ebfc374bebe4ea1ac84a481cfa7c056a0"},"max_stars_repo_licenses":{"kind":"list like","value":["BSD-3-Clause"],"string":"[\n \"BSD-3-Clause\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"section-04-research-and-development/02-machine-learning-pipeline-feature-engineering.ipynb"},"max_issues_repo_name":{"kind":"string","value":"chauthinh/machine-learning-deployment"},"max_issues_repo_head_hexsha":{"kind":"string","value":"ac0dd21ebfc374bebe4ea1ac84a481cfa7c056a0"},"max_issues_repo_licenses":{"kind":"list like","value":["BSD-3-Clause"],"string":"[\n \"BSD-3-Clause\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"section-04-research-and-development/02-machine-learning-pipeline-feature-engineering.ipynb"},"max_forks_repo_name":{"kind":"string","value":"chauthinh/machine-learning-deployment"},"max_forks_repo_head_hexsha":{"kind":"string","value":"ac0dd21ebfc374bebe4ea1ac84a481cfa7c056a0"},"max_forks_repo_licenses":{"kind":"list like","value":["BSD-3-Clause"],"string":"[\n \"BSD-3-Clause\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"avg_line_length":{"kind":"number","value":120.8385864374,"string":"120.838586"},"max_line_length":{"kind":"number","value":11760,"string":"11,760"},"alphanum_fraction":{"kind":"number","value":0.8248101719,"string":"0.82481"},"cells":{"kind":"list like","value":[[["# Machine Learning Pipeline - Feature Engineering\n\nIn the following notebooks, we will go through the implementation of each one of the steps in the Machine Learning Pipeline. \n\nWe will discuss:\n\n1. Data Analysis\n2. **Feature Engineering**\n3. Feature Selection\n4. Model Training\n5. Obtaining Predictions / Scoring\n\n\nWe will use the house price dataset available on [Kaggle.com](https://www.kaggle.com/c/house-prices-advanced-regression-techniques/data). See below for more details.\n\n===================================================================================================\n\n## Predicting Sale Price of Houses\n\nThe aim of the project is to build a machine learning model to predict the sale price of homes based on different explanatory variables describing aspects of residential houses.\n\n\n### Why is this important? \n\nPredicting house prices is useful to identify fruitful investments, or to determine whether the price advertised for a house is over or under-estimated.\n\n\n### What is the objective of the machine learning model?\n\nWe aim to minimise the difference between the real price and the price estimated by our model. We will evaluate model performance with the:\n\n1. mean squared error (mse)\n2. root squared of the mean squared error (rmse)\n3. r-squared (r2).\n\n\n### How do I download the dataset?\n\n- Visit the [Kaggle Website](https://www.kaggle.com/c/house-prices-advanced-regression-techniques/data).\n\n- Remember to **log in**\n\n- Scroll down to the bottom of the page, and click on the link **'train.csv'**, and then click the 'download' blue button towards the right of the screen, to download the dataset.\n\n- The download the file called **'test.csv'** and save it in the directory with the notebooks.\n\n\n**Note the following:**\n\n- You need to be logged in to Kaggle in order to download the datasets.\n- You need to accept the terms and conditions of the competition to download the dataset\n- If you save the file to the directory with the jupyter notebook, then you can run the code as it is written here.","_____no_output_____"],["# Reproducibility: Setting the seed\n\nWith the aim to ensure reproducibility between runs of the same notebook, but also between the research and production environment, for each step that includes some element of randomness, it is extremely important that we **set the seed**.","_____no_output_____"]],[["# to handle datasets\nimport pandas as pd\nimport numpy as np\n\n# for plotting\nimport matplotlib.pyplot as plt\n\n# for the yeo-johnson transformation\nimport scipy.stats as stats\n\n# to divide train and test set\nfrom sklearn.model_selection import train_test_split\n\n# feature scaling\nfrom sklearn.preprocessing import MinMaxScaler\n\n# to save the trained scaler class\nimport joblib\n\n# to visualise al the columns in the dataframe\npd.pandas.set_option('display.max_columns', None)","_____no_output_____"],["# load dataset\ndata = pd.read_csv('train.csv')\n\n# rows and columns of the data\nprint(data.shape)\n\n# visualise the dataset\ndata.head()","(1460, 81)\n"]],[["# Separate dataset into train and test\n\nIt is important to separate our data intro training and testing set. \n\nWhen we engineer features, some techniques learn parameters from data. It is important to learn these parameters only from the train set. This is to avoid over-fitting.\n\nOur feature engineering techniques will learn:\n\n- mean\n- mode\n- exponents for the yeo-johnson\n- category frequency\n- and category to number mappings\n\nfrom the train set.\n\n**Separating the data into train and test involves randomness, therefore, we need to set the seed.**","_____no_output_____"]],[["# Let's separate into train and test set\n# Remember to set the seed (random_state for this sklearn function)\n\nX_train, X_test, y_train, y_test = train_test_split(\n data.drop(['Id', 'SalePrice'], axis=1), # predictive variables\n data['SalePrice'], # target\n test_size=0.1, # portion of dataset to allocate to test set\n random_state=0, # we are setting the seed here\n)\n\nX_train.shape, X_test.shape","_____no_output_____"]],[["# Feature Engineering\n\nIn the following cells, we will engineer the variables of the House Price Dataset so that we tackle:\n\n1. Missing values\n2. Temporal variables\n3. Non-Gaussian distributed variables\n4. Categorical variables: remove rare labels\n5. Categorical variables: convert strings to numbers\n5. Put the variables in a similar scale","_____no_output_____"],["## Target\n\nWe apply the logarithm","_____no_output_____"]],[["y_train = np.log(y_train)\ny_test = np.log(y_test)","_____no_output_____"]],[["## Missing values\n\n### Categorical variables\n\nWe will replace missing values with the string \"missing\" in those variables with a lot of missing data. \n\nAlternatively, we will replace missing data with the most frequent category in those variables that contain fewer observations without values. \n\nThis is common practice.","_____no_output_____"]],[["# let's identify the categorical variables\n# we will capture those of type object\n\ncat_vars = [var for var in data.columns if data[var].dtype == 'O']\n\n# MSSubClass is also categorical by definition, despite its numeric values\n# (you can find the definitions of the variables in the data_description.txt\n# file available on Kaggle, in the same website where you downloaded the data)\n\n# lets add MSSubClass to the list of categorical variables\ncat_vars = cat_vars + ['MSSubClass']\n\n# cast all variables as categorical\nX_train[cat_vars] = X_train[cat_vars].astype('O')\nX_test[cat_vars] = X_test[cat_vars].astype('O')\n\n# number of categorical variables\nlen(cat_vars)","_____no_output_____"],["# make a list of the categorical variables that contain missing values\n\ncat_vars_with_na = [\n var for var in cat_vars\n if X_train[var].isnull().sum() > 0\n]\n\n# print percentage of missing values per variable\nX_train[cat_vars_with_na ].isnull().mean().sort_values(ascending=False)","_____no_output_____"],["# variables to impute with the string missing\nwith_string_missing = [\n var for var in cat_vars_with_na if X_train[var].isnull().mean() > 0.1]\n\n# variables to impute with the most frequent category\nwith_frequent_category = [\n var for var in cat_vars_with_na if X_train[var].isnull().mean() < 0.1]","_____no_output_____"],["with_string_missing","_____no_output_____"],["# replace missing values with new label: \"Missing\"\n\nX_train[with_string_missing] = X_train[with_string_missing].fillna('Missing')\nX_test[with_string_missing] = X_test[with_string_missing].fillna('Missing')","_____no_output_____"],["for var in with_frequent_category:\n \n # there can be more than 1 mode in a variable\n # we take the first one with [0] \n mode = X_train[var].mode()[0]\n \n print(var, mode)\n \n X_train[var].fillna(mode, inplace=True)\n X_test[var].fillna(mode, inplace=True)","MasVnrType None\nBsmtQual TA\nBsmtCond TA\nBsmtExposure No\nBsmtFinType1 Unf\nBsmtFinType2 Unf\nElectrical SBrkr\nGarageType Attchd\nGarageFinish Unf\nGarageQual TA\nGarageCond TA\n"],["# check that we have no missing information in the engineered variables\n\nX_train[cat_vars_with_na].isnull().sum()","_____no_output_____"],["# check that test set does not contain null values in the engineered variables\n\n[var for var in cat_vars_with_na if X_test[var].isnull().sum() > 0]","_____no_output_____"]],[["### Numerical variables\n\nTo engineer missing values in numerical variables, we will:\n\n- add a binary missing indicator variable\n- and then replace the missing values in the original variable with the mean","_____no_output_____"]],[["# now let's identify the numerical variables\n\nnum_vars = [\n var for var in X_train.columns if var not in cat_vars and var != 'SalePrice'\n]\n\n# number of numerical variables\nlen(num_vars)","_____no_output_____"],["# make a list with the numerical variables that contain missing values\nvars_with_na = [\n var for var in num_vars\n if X_train[var].isnull().sum() > 0\n]\n\n# print percentage of missing values per variable\nX_train[vars_with_na].isnull().mean()","_____no_output_____"],["# replace missing values as we described above\n\nfor var in vars_with_na:\n\n # calculate the mean using the train set\n mean_val = X_train[var].mean()\n \n print(var, mean_val)\n\n # add binary missing indicator (in train and test)\n X_train[var + '_na'] = np.where(X_train[var].isnull(), 1, 0)\n X_test[var + '_na'] = np.where(X_test[var].isnull(), 1, 0)\n\n # replace missing values by the mean\n # (in train and test)\n X_train[var].fillna(mean_val, inplace=True)\n X_test[var].fillna(mean_val, inplace=True)\n\n# check that we have no more missing values in the engineered variables\nX_train[vars_with_na].isnull().sum()","LotFrontage 69.87974098057354\nMasVnrArea 103.7974006116208\nGarageYrBlt 1978.2959677419356\n"],["# check that test set does not contain null values in the engineered variables\n\n[var for var in vars_with_na if X_test[var].isnull().sum() > 0]","_____no_output_____"],["# check the binary missing indicator variables\n\nX_train[['LotFrontage_na', 'MasVnrArea_na', 'GarageYrBlt_na']].head()","_____no_output_____"]],[["## Temporal variables\n\n### Capture elapsed time\n\nWe learned in the previous notebook, that there are 4 variables that refer to the years in which the house or the garage were built or remodeled. \n\nWe will capture the time elapsed between those variables and the year in which the house was sold:","_____no_output_____"]],[["def elapsed_years(df, var):\n # capture difference between the year variable\n # and the year in which the house was sold\n df[var] = df['YrSold'] - df[var]\n return df","_____no_output_____"],["for var in ['YearBuilt', 'YearRemodAdd', 'GarageYrBlt']:\n X_train = elapsed_years(X_train, var)\n X_test = elapsed_years(X_test, var)","_____no_output_____"],["# now we drop YrSold\nX_train.drop(['YrSold'], axis=1, inplace=True)\nX_test.drop(['YrSold'], axis=1, inplace=True)","_____no_output_____"]],[["## Numerical variable transformation\n\n### Logarithmic transformation\n\nIn the previous notebook, we observed that the numerical variables are not normally distributed.\n\nWe will transform with the logarightm the positive numerical variables in order to get a more Gaussian-like distribution.","_____no_output_____"]],[["for var in [\"LotFrontage\", \"1stFlrSF\", \"GrLivArea\"]:\n X_train[var] = np.log(X_train[var])\n X_test[var] = np.log(X_test[var])","_____no_output_____"],["# check that test set does not contain null values in the engineered variables\n[var for var in [\"LotFrontage\", \"1stFlrSF\", \"GrLivArea\"] if X_test[var].isnull().sum() > 0]","_____no_output_____"],["# same for train set\n[var for var in [\"LotFrontage\", \"1stFlrSF\", \"GrLivArea\"] if X_train[var].isnull().sum() > 0]","_____no_output_____"]],[["### Yeo-Johnson transformation\n\nWe will apply the Yeo-Johnson transformation to LotArea.","_____no_output_____"]],[["# the yeo-johnson transformation learns the best exponent to transform the variable\n# it needs to learn it from the train set: \nX_train['LotArea'], param = stats.yeojohnson(X_train['LotArea'])\n\n# and then apply the transformation to the test set with the same\n# parameter: see who this time we pass param as argument to the \n# yeo-johnson\nX_test['LotArea'] = stats.yeojohnson(X_test['LotArea'], lmbda=param)\n\nprint(param)","-12.55283001172003\n"],["# check absence of na in the train set\n[var for var in X_train.columns if X_train[var].isnull().sum() > 0]","_____no_output_____"],["# check absence of na in the test set\n[var for var in X_train.columns if X_test[var].isnull().sum() > 0]","_____no_output_____"]],[["### Binarize skewed variables\n\nThere were a few variables very skewed, we would transform those into binary variables.","_____no_output_____"]],[["skewed = [\n 'BsmtFinSF2', 'LowQualFinSF', 'EnclosedPorch',\n '3SsnPorch', 'ScreenPorch', 'MiscVal'\n]\n\nfor var in skewed:\n \n # map the variable values into 0 and 1\n X_train[var] = np.where(X_train[var]==0, 0, 1)\n X_test[var] = np.where(X_test[var]==0, 0, 1)","_____no_output_____"]],[["## Categorical variables\n\n### Apply mappings\n\nThese are variables which values have an assigned order, related to quality. For more information, check Kaggle website.","_____no_output_____"]],[["# re-map strings to numbers, which determine quality\n\nqual_mappings = {'Po': 1, 'Fa': 2, 'TA': 3, 'Gd': 4, 'Ex': 5, 'Missing': 0, 'NA': 0}\n\nqual_vars = ['ExterQual', 'ExterCond', 'BsmtQual', 'BsmtCond',\n 'HeatingQC', 'KitchenQual', 'FireplaceQu',\n 'GarageQual', 'GarageCond',\n ]\n\nfor var in qual_vars:\n X_train[var] = X_train[var].map(qual_mappings)\n X_test[var] = X_test[var].map(qual_mappings)","_____no_output_____"],["exposure_mappings = {'No': 1, 'Mn': 2, 'Av': 3, 'Gd': 4}\n\nvar = 'BsmtExposure'\n\nX_train[var] = X_train[var].map(exposure_mappings)\nX_test[var] = X_test[var].map(exposure_mappings)","_____no_output_____"],["finish_mappings = {'Missing': 0, 'NA': 0, 'Unf': 1, 'LwQ': 2, 'Rec': 3, 'BLQ': 4, 'ALQ': 5, 'GLQ': 6}\n\nfinish_vars = ['BsmtFinType1', 'BsmtFinType2']\n\nfor var in finish_vars:\n X_train[var] = X_train[var].map(finish_mappings)\n X_test[var] = X_test[var].map(finish_mappings)","_____no_output_____"],["garage_mappings = {'Missing': 0, 'NA': 0, 'Unf': 1, 'RFn': 2, 'Fin': 3}\n\nvar = 'GarageFinish'\n\nX_train[var] = X_train[var].map(garage_mappings)\nX_test[var] = X_test[var].map(garage_mappings)","_____no_output_____"],["fence_mappings = {'Missing': 0, 'NA': 0, 'MnWw': 1, 'GdWo': 2, 'MnPrv': 3, 'GdPrv': 4}\n\nvar = 'Fence'\n\nX_train[var] = X_train[var].map(fence_mappings)\nX_test[var] = X_test[var].map(fence_mappings)","_____no_output_____"],["# check absence of na in the train set\n[var for var in X_train.columns if X_train[var].isnull().sum() > 0]","_____no_output_____"]],[["### Removing Rare Labels\n\nFor the remaining categorical variables, we will group those categories that are present in less than 1% of the observations. That is, all values of categorical variables that are shared by less than 1% of houses, well be replaced by the string \"Rare\".\n\nTo learn more about how to handle categorical variables visit our course [Feature Engineering for Machine Learning](https://www.udemy.com/course/feature-engineering-for-machine-learning/?referralCode=A855148E05283015CF06) in Udemy.","_____no_output_____"]],[["# capture all quality variables\n\nqual_vars = qual_vars + finish_vars + ['BsmtExposure','GarageFinish','Fence']\n\n# capture the remaining categorical variables\n# (those that we did not re-map)\n\ncat_others = [\n var for var in cat_vars if var not in qual_vars\n]\n\nlen(cat_others)","_____no_output_____"],["def find_frequent_labels(df, var, rare_perc):\n \n # function finds the labels that are shared by more than\n # a certain % of the houses in the dataset\n\n df = df.copy()\n\n tmp = df.groupby(var)[var].count() / len(df)\n\n return tmp[tmp > rare_perc].index\n\n\nfor var in cat_others:\n \n # find the frequent categories\n frequent_ls = find_frequent_labels(X_train, var, 0.01)\n \n print(var, frequent_ls)\n print()\n \n # replace rare categories by the string \"Rare\"\n X_train[var] = np.where(X_train[var].isin(\n frequent_ls), X_train[var], 'Rare')\n \n X_test[var] = np.where(X_test[var].isin(\n frequent_ls), X_test[var], 'Rare')","MSZoning Index(['FV', 'RH', 'RL', 'RM'], dtype='object', name='MSZoning')\n\nStreet Index(['Pave'], dtype='object', name='Street')\n\nAlley Index(['Grvl', 'Missing', 'Pave'], dtype='object', name='Alley')\n\nLotShape Index(['IR1', 'IR2', 'Reg'], dtype='object', name='LotShape')\n\nLandContour Index(['Bnk', 'HLS', 'Low', 'Lvl'], dtype='object', name='LandContour')\n\nUtilities Index(['AllPub'], dtype='object', name='Utilities')\n\nLotConfig Index(['Corner', 'CulDSac', 'FR2', 'Inside'], dtype='object', name='LotConfig')\n\nLandSlope Index(['Gtl', 'Mod'], dtype='object', name='LandSlope')\n\nNeighborhood Index(['Blmngtn', 'BrDale', 'BrkSide', 'ClearCr', 'CollgCr', 'Crawfor',\n 'Edwards', 'Gilbert', 'IDOTRR', 'MeadowV', 'Mitchel', 'NAmes', 'NWAmes',\n 'NoRidge', 'NridgHt', 'OldTown', 'SWISU', 'Sawyer', 'SawyerW',\n 'Somerst', 'StoneBr', 'Timber'],\n dtype='object', name='Neighborhood')\n\nCondition1 Index(['Artery', 'Feedr', 'Norm', 'PosN', 'RRAn'], dtype='object', name='Condition1')\n\nCondition2 Index(['Norm'], dtype='object', name='Condition2')\n\nBldgType Index(['1Fam', '2fmCon', 'Duplex', 'Twnhs', 'TwnhsE'], dtype='object', name='BldgType')\n\nHouseStyle Index(['1.5Fin', '1Story', '2Story', 'SFoyer', 'SLvl'], dtype='object', name='HouseStyle')\n\nRoofStyle Index(['Gable', 'Hip'], dtype='object', name='RoofStyle')\n\nRoofMatl Index(['CompShg'], dtype='object', name='RoofMatl')\n\nExterior1st Index(['AsbShng', 'BrkFace', 'CemntBd', 'HdBoard', 'MetalSd', 'Plywood',\n 'Stucco', 'VinylSd', 'Wd Sdng', 'WdShing'],\n dtype='object', name='Exterior1st')\n\nExterior2nd Index(['AsbShng', 'BrkFace', 'CmentBd', 'HdBoard', 'MetalSd', 'Plywood',\n 'Stucco', 'VinylSd', 'Wd Sdng', 'Wd Shng'],\n dtype='object', name='Exterior2nd')\n\nMasVnrType Index(['BrkFace', 'None', 'Stone'], dtype='object', name='MasVnrType')\n\nFoundation Index(['BrkTil', 'CBlock', 'PConc', 'Slab'], dtype='object', name='Foundation')\n\nHeating Index(['GasA', 'GasW'], dtype='object', name='Heating')\n\nCentralAir Index(['N', 'Y'], dtype='object', name='CentralAir')\n\nElectrical Index(['FuseA', 'FuseF', 'SBrkr'], dtype='object', name='Electrical')\n\nFunctional Index(['Min1', 'Min2', 'Mod', 'Typ'], dtype='object', name='Functional')\n\nGarageType Index(['Attchd', 'Basment', 'BuiltIn', 'Detchd'], dtype='object', name='GarageType')\n\nPavedDrive Index(['N', 'P', 'Y'], dtype='object', name='PavedDrive')\n\nPoolQC Index(['Missing'], dtype='object', name='PoolQC')\n\nMiscFeature Index(['Missing', 'Shed'], dtype='object', name='MiscFeature')\n\nSaleType Index(['COD', 'New', 'WD'], dtype='object', name='SaleType')\n\nSaleCondition Index(['Abnorml', 'Family', 'Normal', 'Partial'], dtype='object', name='SaleCondition')\n\nMSSubClass Int64Index([20, 30, 50, 60, 70, 75, 80, 85, 90, 120, 160, 190], dtype='int64', name='MSSubClass')\n\n"]],[["### Encoding of categorical variables\n\nNext, we need to transform the strings of the categorical variables into numbers. \n\nWe will do it so that we capture the monotonic relationship between the label and the target.\n\nTo learn more about how to encode categorical variables visit our course [Feature Engineering for Machine Learning](https://www.udemy.com/course/feature-engineering-for-machine-learning/?referralCode=A855148E05283015CF06) in Udemy.","_____no_output_____"]],[["# this function will assign discrete values to the strings of the variables,\n# so that the smaller value corresponds to the category that shows the smaller\n# mean house sale price\n\ndef replace_categories(train, test, y_train, var, target):\n \n tmp = pd.concat([X_train, y_train], axis=1)\n \n # order the categories in a variable from that with the lowest\n # house sale price, to that with the highest\n ordered_labels = tmp.groupby([var])[target].mean().sort_values().index\n\n # create a dictionary of ordered categories to integer values\n ordinal_label = {k: i for i, k in enumerate(ordered_labels, 0)}\n \n print(var, ordinal_label)\n print()\n\n # use the dictionary to replace the categorical strings by integers\n train[var] = train[var].map(ordinal_label)\n test[var] = test[var].map(ordinal_label)","_____no_output_____"],["for var in cat_others:\n replace_categories(X_train, X_test, y_train, var, 'SalePrice')","MSZoning {'Rare': 0, 'RM': 1, 'RH': 2, 'RL': 3, 'FV': 4}\n\nStreet {'Rare': 0, 'Pave': 1}\n\nAlley {'Grvl': 0, 'Pave': 1, 'Missing': 2}\n\nLotShape {'Reg': 0, 'IR1': 1, 'Rare': 2, 'IR2': 3}\n\nLandContour {'Bnk': 0, 'Lvl': 1, 'Low': 2, 'HLS': 3}\n\nUtilities {'Rare': 0, 'AllPub': 1}\n\nLotConfig {'Inside': 0, 'FR2': 1, 'Corner': 2, 'Rare': 3, 'CulDSac': 4}\n\nLandSlope {'Gtl': 0, 'Mod': 1, 'Rare': 2}\n\nNeighborhood {'IDOTRR': 0, 'MeadowV': 1, 'BrDale': 2, 'Edwards': 3, 'BrkSide': 4, 'OldTown': 5, 'Sawyer': 6, 'SWISU': 7, 'NAmes': 8, 'Mitchel': 9, 'SawyerW': 10, 'Rare': 11, 'NWAmes': 12, 'Gilbert': 13, 'Blmngtn': 14, 'CollgCr': 15, 'Crawfor': 16, 'ClearCr': 17, 'Somerst': 18, 'Timber': 19, 'StoneBr': 20, 'NridgHt': 21, 'NoRidge': 22}\n\nCondition1 {'Artery': 0, 'Feedr': 1, 'Norm': 2, 'RRAn': 3, 'Rare': 4, 'PosN': 5}\n\nCondition2 {'Rare': 0, 'Norm': 1}\n\nBldgType {'2fmCon': 0, 'Duplex': 1, 'Twnhs': 2, '1Fam': 3, 'TwnhsE': 4}\n\nHouseStyle {'SFoyer': 0, '1.5Fin': 1, 'Rare': 2, '1Story': 3, 'SLvl': 4, '2Story': 5}\n\nRoofStyle {'Gable': 0, 'Rare': 1, 'Hip': 2}\n\nRoofMatl {'CompShg': 0, 'Rare': 1}\n\nExterior1st {'AsbShng': 0, 'Wd Sdng': 1, 'WdShing': 2, 'MetalSd': 3, 'Stucco': 4, 'Rare': 5, 'HdBoard': 6, 'Plywood': 7, 'BrkFace': 8, 'CemntBd': 9, 'VinylSd': 10}\n\nExterior2nd {'AsbShng': 0, 'Wd Sdng': 1, 'MetalSd': 2, 'Wd Shng': 3, 'Stucco': 4, 'Rare': 5, 'HdBoard': 6, 'Plywood': 7, 'BrkFace': 8, 'CmentBd': 9, 'VinylSd': 10}\n\nMasVnrType {'Rare': 0, 'None': 1, 'BrkFace': 2, 'Stone': 3}\n\nFoundation {'Slab': 0, 'BrkTil': 1, 'CBlock': 2, 'Rare': 3, 'PConc': 4}\n\nHeating {'Rare': 0, 'GasW': 1, 'GasA': 2}\n\nCentralAir {'N': 0, 'Y': 1}\n\nElectrical {'Rare': 0, 'FuseF': 1, 'FuseA': 2, 'SBrkr': 3}\n\nFunctional {'Rare': 0, 'Min2': 1, 'Mod': 2, 'Min1': 3, 'Typ': 4}\n\nGarageType {'Rare': 0, 'Detchd': 1, 'Basment': 2, 'Attchd': 3, 'BuiltIn': 4}\n\nPavedDrive {'N': 0, 'P': 1, 'Y': 2}\n\nPoolQC {'Missing': 0, 'Rare': 1}\n\nMiscFeature {'Rare': 0, 'Shed': 1, 'Missing': 2}\n\nSaleType {'COD': 0, 'Rare': 1, 'WD': 2, 'New': 3}\n\nSaleCondition {'Rare': 0, 'Abnorml': 1, 'Family': 2, 'Normal': 3, 'Partial': 4}\n\nMSSubClass {30: 0, 'Rare': 1, 190: 2, 90: 3, 160: 4, 50: 5, 85: 6, 70: 7, 80: 8, 20: 9, 75: 10, 120: 11, 60: 12}\n\n"],["# check absence of na in the train set\n[var for var in X_train.columns if X_train[var].isnull().sum() > 0]","_____no_output_____"],["# check absence of na in the test set\n[var for var in X_test.columns if X_test[var].isnull().sum() > 0]","_____no_output_____"],["# let me show you what I mean by monotonic relationship\n# between labels and target\n\ndef analyse_vars(train, y_train, var):\n \n # function plots median house sale price per encoded\n # category\n \n tmp = pd.concat([X_train, np.log(y_train)], axis=1)\n \n tmp.groupby(var)['SalePrice'].median().plot.bar()\n plt.title(var)\n plt.ylim(2.2, 2.6)\n plt.ylabel('SalePrice')\n plt.show()\n \nfor var in cat_others:\n analyse_vars(X_train, y_train, var)","_____no_output_____"]],[["The monotonic relationship is particularly clear for the variables MSZoning and Neighborhood. Note how, the higher the integer that now represents the category, the higher the mean house sale price.\n\n(remember that the target is log-transformed, that is why the differences seem so small).","_____no_output_____"],["## Feature Scaling\n\nFor use in linear models, features need to be either scaled. We will scale features to the minimum and maximum values:","_____no_output_____"]],[["# create scaler\nscaler = MinMaxScaler()\n\n# fit the scaler to the train set\nscaler.fit(X_train) \n\n# transform the train and test set\n\n# sklearn returns numpy arrays, so we wrap the\n# array with a pandas dataframe\n\nX_train = pd.DataFrame(\n scaler.transform(X_train),\n columns=X_train.columns\n)\n\nX_test = pd.DataFrame(\n scaler.transform(X_test),\n columns=X_train.columns\n)","_____no_output_____"],["X_train.head()","_____no_output_____"],["# let's now save the train and test sets for the next notebook!\n\nX_train.to_csv('xtrain.csv', index=False)\nX_test.to_csv('xtest.csv', index=False)\n\ny_train.to_csv('ytrain.csv', index=False)\ny_test.to_csv('ytest.csv', index=False)","_____no_output_____"],["# now let's save the scaler\n\njoblib.dump(scaler, 'minmax_scaler.joblib') ","_____no_output_____"]],[["That concludes the feature engineering section.\n\n# Additional Resources\n\n- [Feature Engineering for Machine Learning](https://www.udemy.com/course/feature-engineering-for-machine-learning/?referralCode=A855148E05283015CF06) - Online Course\n- [Packt Feature Engineering Cookbook](https://www.packtpub.com/data/python-feature-engineering-cookbook) - Book\n- [Feature Engineering for Machine Learning: A comprehensive Overview](https://trainindata.medium.com/feature-engineering-for-machine-learning-a-comprehensive-overview-a7ad04c896f8) - Article\n- [Practical Code Implementations of Feature Engineering for Machine Learning with Python](https://towardsdatascience.com/practical-code-implementations-of-feature-engineering-for-machine-learning-with-python-f13b953d4bcd) - Article","_____no_output_____"]]],"string":"[\n [\n [\n \"# Machine Learning Pipeline - Feature Engineering\\n\\nIn the following notebooks, we will go through the implementation of each one of the steps in the Machine Learning Pipeline. \\n\\nWe will discuss:\\n\\n1. Data Analysis\\n2. **Feature Engineering**\\n3. Feature Selection\\n4. Model Training\\n5. Obtaining Predictions / Scoring\\n\\n\\nWe will use the house price dataset available on [Kaggle.com](https://www.kaggle.com/c/house-prices-advanced-regression-techniques/data). See below for more details.\\n\\n===================================================================================================\\n\\n## Predicting Sale Price of Houses\\n\\nThe aim of the project is to build a machine learning model to predict the sale price of homes based on different explanatory variables describing aspects of residential houses.\\n\\n\\n### Why is this important? \\n\\nPredicting house prices is useful to identify fruitful investments, or to determine whether the price advertised for a house is over or under-estimated.\\n\\n\\n### What is the objective of the machine learning model?\\n\\nWe aim to minimise the difference between the real price and the price estimated by our model. We will evaluate model performance with the:\\n\\n1. mean squared error (mse)\\n2. root squared of the mean squared error (rmse)\\n3. r-squared (r2).\\n\\n\\n### How do I download the dataset?\\n\\n- Visit the [Kaggle Website](https://www.kaggle.com/c/house-prices-advanced-regression-techniques/data).\\n\\n- Remember to **log in**\\n\\n- Scroll down to the bottom of the page, and click on the link **'train.csv'**, and then click the 'download' blue button towards the right of the screen, to download the dataset.\\n\\n- The download the file called **'test.csv'** and save it in the directory with the notebooks.\\n\\n\\n**Note the following:**\\n\\n- You need to be logged in to Kaggle in order to download the datasets.\\n- You need to accept the terms and conditions of the competition to download the dataset\\n- If you save the file to the directory with the jupyter notebook, then you can run the code as it is written here.\",\n \"_____no_output_____\"\n ],\n [\n \"# Reproducibility: Setting the seed\\n\\nWith the aim to ensure reproducibility between runs of the same notebook, but also between the research and production environment, for each step that includes some element of randomness, it is extremely important that we **set the seed**.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# to handle datasets\\nimport pandas as pd\\nimport numpy as np\\n\\n# for plotting\\nimport matplotlib.pyplot as plt\\n\\n# for the yeo-johnson transformation\\nimport scipy.stats as stats\\n\\n# to divide train and test set\\nfrom sklearn.model_selection import train_test_split\\n\\n# feature scaling\\nfrom sklearn.preprocessing import MinMaxScaler\\n\\n# to save the trained scaler class\\nimport joblib\\n\\n# to visualise al the columns in the dataframe\\npd.pandas.set_option('display.max_columns', None)\",\n \"_____no_output_____\"\n ],\n [\n \"# load dataset\\ndata = pd.read_csv('train.csv')\\n\\n# rows and columns of the data\\nprint(data.shape)\\n\\n# visualise the dataset\\ndata.head()\",\n \"(1460, 81)\\n\"\n ]\n ],\n [\n [\n \"# Separate dataset into train and test\\n\\nIt is important to separate our data intro training and testing set. \\n\\nWhen we engineer features, some techniques learn parameters from data. It is important to learn these parameters only from the train set. This is to avoid over-fitting.\\n\\nOur feature engineering techniques will learn:\\n\\n- mean\\n- mode\\n- exponents for the yeo-johnson\\n- category frequency\\n- and category to number mappings\\n\\nfrom the train set.\\n\\n**Separating the data into train and test involves randomness, therefore, we need to set the seed.**\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# Let's separate into train and test set\\n# Remember to set the seed (random_state for this sklearn function)\\n\\nX_train, X_test, y_train, y_test = train_test_split(\\n data.drop(['Id', 'SalePrice'], axis=1), # predictive variables\\n data['SalePrice'], # target\\n test_size=0.1, # portion of dataset to allocate to test set\\n random_state=0, # we are setting the seed here\\n)\\n\\nX_train.shape, X_test.shape\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# Feature Engineering\\n\\nIn the following cells, we will engineer the variables of the House Price Dataset so that we tackle:\\n\\n1. Missing values\\n2. Temporal variables\\n3. Non-Gaussian distributed variables\\n4. Categorical variables: remove rare labels\\n5. Categorical variables: convert strings to numbers\\n5. Put the variables in a similar scale\",\n \"_____no_output_____\"\n ],\n [\n \"## Target\\n\\nWe apply the logarithm\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"y_train = np.log(y_train)\\ny_test = np.log(y_test)\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"## Missing values\\n\\n### Categorical variables\\n\\nWe will replace missing values with the string \\\"missing\\\" in those variables with a lot of missing data. \\n\\nAlternatively, we will replace missing data with the most frequent category in those variables that contain fewer observations without values. \\n\\nThis is common practice.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# let's identify the categorical variables\\n# we will capture those of type object\\n\\ncat_vars = [var for var in data.columns if data[var].dtype == 'O']\\n\\n# MSSubClass is also categorical by definition, despite its numeric values\\n# (you can find the definitions of the variables in the data_description.txt\\n# file available on Kaggle, in the same website where you downloaded the data)\\n\\n# lets add MSSubClass to the list of categorical variables\\ncat_vars = cat_vars + ['MSSubClass']\\n\\n# cast all variables as categorical\\nX_train[cat_vars] = X_train[cat_vars].astype('O')\\nX_test[cat_vars] = X_test[cat_vars].astype('O')\\n\\n# number of categorical variables\\nlen(cat_vars)\",\n \"_____no_output_____\"\n ],\n [\n \"# make a list of the categorical variables that contain missing values\\n\\ncat_vars_with_na = [\\n var for var in cat_vars\\n if X_train[var].isnull().sum() > 0\\n]\\n\\n# print percentage of missing values per variable\\nX_train[cat_vars_with_na ].isnull().mean().sort_values(ascending=False)\",\n \"_____no_output_____\"\n ],\n [\n \"# variables to impute with the string missing\\nwith_string_missing = [\\n var for var in cat_vars_with_na if X_train[var].isnull().mean() > 0.1]\\n\\n# variables to impute with the most frequent category\\nwith_frequent_category = [\\n var for var in cat_vars_with_na if X_train[var].isnull().mean() < 0.1]\",\n \"_____no_output_____\"\n ],\n [\n \"with_string_missing\",\n \"_____no_output_____\"\n ],\n [\n \"# replace missing values with new label: \\\"Missing\\\"\\n\\nX_train[with_string_missing] = X_train[with_string_missing].fillna('Missing')\\nX_test[with_string_missing] = X_test[with_string_missing].fillna('Missing')\",\n \"_____no_output_____\"\n ],\n [\n \"for var in with_frequent_category:\\n \\n # there can be more than 1 mode in a variable\\n # we take the first one with [0] \\n mode = X_train[var].mode()[0]\\n \\n print(var, mode)\\n \\n X_train[var].fillna(mode, inplace=True)\\n X_test[var].fillna(mode, inplace=True)\",\n \"MasVnrType None\\nBsmtQual TA\\nBsmtCond TA\\nBsmtExposure No\\nBsmtFinType1 Unf\\nBsmtFinType2 Unf\\nElectrical SBrkr\\nGarageType Attchd\\nGarageFinish Unf\\nGarageQual TA\\nGarageCond TA\\n\"\n ],\n [\n \"# check that we have no missing information in the engineered variables\\n\\nX_train[cat_vars_with_na].isnull().sum()\",\n \"_____no_output_____\"\n ],\n [\n \"# check that test set does not contain null values in the engineered variables\\n\\n[var for var in cat_vars_with_na if X_test[var].isnull().sum() > 0]\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"### Numerical variables\\n\\nTo engineer missing values in numerical variables, we will:\\n\\n- add a binary missing indicator variable\\n- and then replace the missing values in the original variable with the mean\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# now let's identify the numerical variables\\n\\nnum_vars = [\\n var for var in X_train.columns if var not in cat_vars and var != 'SalePrice'\\n]\\n\\n# number of numerical variables\\nlen(num_vars)\",\n \"_____no_output_____\"\n ],\n [\n \"# make a list with the numerical variables that contain missing values\\nvars_with_na = [\\n var for var in num_vars\\n if X_train[var].isnull().sum() > 0\\n]\\n\\n# print percentage of missing values per variable\\nX_train[vars_with_na].isnull().mean()\",\n \"_____no_output_____\"\n ],\n [\n \"# replace missing values as we described above\\n\\nfor var in vars_with_na:\\n\\n # calculate the mean using the train set\\n mean_val = X_train[var].mean()\\n \\n print(var, mean_val)\\n\\n # add binary missing indicator (in train and test)\\n X_train[var + '_na'] = np.where(X_train[var].isnull(), 1, 0)\\n X_test[var + '_na'] = np.where(X_test[var].isnull(), 1, 0)\\n\\n # replace missing values by the mean\\n # (in train and test)\\n X_train[var].fillna(mean_val, inplace=True)\\n X_test[var].fillna(mean_val, inplace=True)\\n\\n# check that we have no more missing values in the engineered variables\\nX_train[vars_with_na].isnull().sum()\",\n \"LotFrontage 69.87974098057354\\nMasVnrArea 103.7974006116208\\nGarageYrBlt 1978.2959677419356\\n\"\n ],\n [\n \"# check that test set does not contain null values in the engineered variables\\n\\n[var for var in vars_with_na if X_test[var].isnull().sum() > 0]\",\n \"_____no_output_____\"\n ],\n [\n \"# check the binary missing indicator variables\\n\\nX_train[['LotFrontage_na', 'MasVnrArea_na', 'GarageYrBlt_na']].head()\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"## Temporal variables\\n\\n### Capture elapsed time\\n\\nWe learned in the previous notebook, that there are 4 variables that refer to the years in which the house or the garage were built or remodeled. \\n\\nWe will capture the time elapsed between those variables and the year in which the house was sold:\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"def elapsed_years(df, var):\\n # capture difference between the year variable\\n # and the year in which the house was sold\\n df[var] = df['YrSold'] - df[var]\\n return df\",\n \"_____no_output_____\"\n ],\n [\n \"for var in ['YearBuilt', 'YearRemodAdd', 'GarageYrBlt']:\\n X_train = elapsed_years(X_train, var)\\n X_test = elapsed_years(X_test, var)\",\n \"_____no_output_____\"\n ],\n [\n \"# now we drop YrSold\\nX_train.drop(['YrSold'], axis=1, inplace=True)\\nX_test.drop(['YrSold'], axis=1, inplace=True)\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"## Numerical variable transformation\\n\\n### Logarithmic transformation\\n\\nIn the previous notebook, we observed that the numerical variables are not normally distributed.\\n\\nWe will transform with the logarightm the positive numerical variables in order to get a more Gaussian-like distribution.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"for var in [\\\"LotFrontage\\\", \\\"1stFlrSF\\\", \\\"GrLivArea\\\"]:\\n X_train[var] = np.log(X_train[var])\\n X_test[var] = np.log(X_test[var])\",\n \"_____no_output_____\"\n ],\n [\n \"# check that test set does not contain null values in the engineered variables\\n[var for var in [\\\"LotFrontage\\\", \\\"1stFlrSF\\\", \\\"GrLivArea\\\"] if X_test[var].isnull().sum() > 0]\",\n \"_____no_output_____\"\n ],\n [\n \"# same for train set\\n[var for var in [\\\"LotFrontage\\\", \\\"1stFlrSF\\\", \\\"GrLivArea\\\"] if X_train[var].isnull().sum() > 0]\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"### Yeo-Johnson transformation\\n\\nWe will apply the Yeo-Johnson transformation to LotArea.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# the yeo-johnson transformation learns the best exponent to transform the variable\\n# it needs to learn it from the train set: \\nX_train['LotArea'], param = stats.yeojohnson(X_train['LotArea'])\\n\\n# and then apply the transformation to the test set with the same\\n# parameter: see who this time we pass param as argument to the \\n# yeo-johnson\\nX_test['LotArea'] = stats.yeojohnson(X_test['LotArea'], lmbda=param)\\n\\nprint(param)\",\n \"-12.55283001172003\\n\"\n ],\n [\n \"# check absence of na in the train set\\n[var for var in X_train.columns if X_train[var].isnull().sum() > 0]\",\n \"_____no_output_____\"\n ],\n [\n \"# check absence of na in the test set\\n[var for var in X_train.columns if X_test[var].isnull().sum() > 0]\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"### Binarize skewed variables\\n\\nThere were a few variables very skewed, we would transform those into binary variables.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"skewed = [\\n 'BsmtFinSF2', 'LowQualFinSF', 'EnclosedPorch',\\n '3SsnPorch', 'ScreenPorch', 'MiscVal'\\n]\\n\\nfor var in skewed:\\n \\n # map the variable values into 0 and 1\\n X_train[var] = np.where(X_train[var]==0, 0, 1)\\n X_test[var] = np.where(X_test[var]==0, 0, 1)\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"## Categorical variables\\n\\n### Apply mappings\\n\\nThese are variables which values have an assigned order, related to quality. For more information, check Kaggle website.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# re-map strings to numbers, which determine quality\\n\\nqual_mappings = {'Po': 1, 'Fa': 2, 'TA': 3, 'Gd': 4, 'Ex': 5, 'Missing': 0, 'NA': 0}\\n\\nqual_vars = ['ExterQual', 'ExterCond', 'BsmtQual', 'BsmtCond',\\n 'HeatingQC', 'KitchenQual', 'FireplaceQu',\\n 'GarageQual', 'GarageCond',\\n ]\\n\\nfor var in qual_vars:\\n X_train[var] = X_train[var].map(qual_mappings)\\n X_test[var] = X_test[var].map(qual_mappings)\",\n \"_____no_output_____\"\n ],\n [\n \"exposure_mappings = {'No': 1, 'Mn': 2, 'Av': 3, 'Gd': 4}\\n\\nvar = 'BsmtExposure'\\n\\nX_train[var] = X_train[var].map(exposure_mappings)\\nX_test[var] = X_test[var].map(exposure_mappings)\",\n \"_____no_output_____\"\n ],\n [\n \"finish_mappings = {'Missing': 0, 'NA': 0, 'Unf': 1, 'LwQ': 2, 'Rec': 3, 'BLQ': 4, 'ALQ': 5, 'GLQ': 6}\\n\\nfinish_vars = ['BsmtFinType1', 'BsmtFinType2']\\n\\nfor var in finish_vars:\\n X_train[var] = X_train[var].map(finish_mappings)\\n X_test[var] = X_test[var].map(finish_mappings)\",\n \"_____no_output_____\"\n ],\n [\n \"garage_mappings = {'Missing': 0, 'NA': 0, 'Unf': 1, 'RFn': 2, 'Fin': 3}\\n\\nvar = 'GarageFinish'\\n\\nX_train[var] = X_train[var].map(garage_mappings)\\nX_test[var] = X_test[var].map(garage_mappings)\",\n \"_____no_output_____\"\n ],\n [\n \"fence_mappings = {'Missing': 0, 'NA': 0, 'MnWw': 1, 'GdWo': 2, 'MnPrv': 3, 'GdPrv': 4}\\n\\nvar = 'Fence'\\n\\nX_train[var] = X_train[var].map(fence_mappings)\\nX_test[var] = X_test[var].map(fence_mappings)\",\n \"_____no_output_____\"\n ],\n [\n \"# check absence of na in the train set\\n[var for var in X_train.columns if X_train[var].isnull().sum() > 0]\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"### Removing Rare Labels\\n\\nFor the remaining categorical variables, we will group those categories that are present in less than 1% of the observations. That is, all values of categorical variables that are shared by less than 1% of houses, well be replaced by the string \\\"Rare\\\".\\n\\nTo learn more about how to handle categorical variables visit our course [Feature Engineering for Machine Learning](https://www.udemy.com/course/feature-engineering-for-machine-learning/?referralCode=A855148E05283015CF06) in Udemy.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# capture all quality variables\\n\\nqual_vars = qual_vars + finish_vars + ['BsmtExposure','GarageFinish','Fence']\\n\\n# capture the remaining categorical variables\\n# (those that we did not re-map)\\n\\ncat_others = [\\n var for var in cat_vars if var not in qual_vars\\n]\\n\\nlen(cat_others)\",\n \"_____no_output_____\"\n ],\n [\n \"def find_frequent_labels(df, var, rare_perc):\\n \\n # function finds the labels that are shared by more than\\n # a certain % of the houses in the dataset\\n\\n df = df.copy()\\n\\n tmp = df.groupby(var)[var].count() / len(df)\\n\\n return tmp[tmp > rare_perc].index\\n\\n\\nfor var in cat_others:\\n \\n # find the frequent categories\\n frequent_ls = find_frequent_labels(X_train, var, 0.01)\\n \\n print(var, frequent_ls)\\n print()\\n \\n # replace rare categories by the string \\\"Rare\\\"\\n X_train[var] = np.where(X_train[var].isin(\\n frequent_ls), X_train[var], 'Rare')\\n \\n X_test[var] = np.where(X_test[var].isin(\\n frequent_ls), X_test[var], 'Rare')\",\n \"MSZoning Index(['FV', 'RH', 'RL', 'RM'], dtype='object', name='MSZoning')\\n\\nStreet Index(['Pave'], dtype='object', name='Street')\\n\\nAlley Index(['Grvl', 'Missing', 'Pave'], dtype='object', name='Alley')\\n\\nLotShape Index(['IR1', 'IR2', 'Reg'], dtype='object', name='LotShape')\\n\\nLandContour Index(['Bnk', 'HLS', 'Low', 'Lvl'], dtype='object', name='LandContour')\\n\\nUtilities Index(['AllPub'], dtype='object', name='Utilities')\\n\\nLotConfig Index(['Corner', 'CulDSac', 'FR2', 'Inside'], dtype='object', name='LotConfig')\\n\\nLandSlope Index(['Gtl', 'Mod'], dtype='object', name='LandSlope')\\n\\nNeighborhood Index(['Blmngtn', 'BrDale', 'BrkSide', 'ClearCr', 'CollgCr', 'Crawfor',\\n 'Edwards', 'Gilbert', 'IDOTRR', 'MeadowV', 'Mitchel', 'NAmes', 'NWAmes',\\n 'NoRidge', 'NridgHt', 'OldTown', 'SWISU', 'Sawyer', 'SawyerW',\\n 'Somerst', 'StoneBr', 'Timber'],\\n dtype='object', name='Neighborhood')\\n\\nCondition1 Index(['Artery', 'Feedr', 'Norm', 'PosN', 'RRAn'], dtype='object', name='Condition1')\\n\\nCondition2 Index(['Norm'], dtype='object', name='Condition2')\\n\\nBldgType Index(['1Fam', '2fmCon', 'Duplex', 'Twnhs', 'TwnhsE'], dtype='object', name='BldgType')\\n\\nHouseStyle Index(['1.5Fin', '1Story', '2Story', 'SFoyer', 'SLvl'], dtype='object', name='HouseStyle')\\n\\nRoofStyle Index(['Gable', 'Hip'], dtype='object', name='RoofStyle')\\n\\nRoofMatl Index(['CompShg'], dtype='object', name='RoofMatl')\\n\\nExterior1st Index(['AsbShng', 'BrkFace', 'CemntBd', 'HdBoard', 'MetalSd', 'Plywood',\\n 'Stucco', 'VinylSd', 'Wd Sdng', 'WdShing'],\\n dtype='object', name='Exterior1st')\\n\\nExterior2nd Index(['AsbShng', 'BrkFace', 'CmentBd', 'HdBoard', 'MetalSd', 'Plywood',\\n 'Stucco', 'VinylSd', 'Wd Sdng', 'Wd Shng'],\\n dtype='object', name='Exterior2nd')\\n\\nMasVnrType Index(['BrkFace', 'None', 'Stone'], dtype='object', name='MasVnrType')\\n\\nFoundation Index(['BrkTil', 'CBlock', 'PConc', 'Slab'], dtype='object', name='Foundation')\\n\\nHeating Index(['GasA', 'GasW'], dtype='object', name='Heating')\\n\\nCentralAir Index(['N', 'Y'], dtype='object', name='CentralAir')\\n\\nElectrical Index(['FuseA', 'FuseF', 'SBrkr'], dtype='object', name='Electrical')\\n\\nFunctional Index(['Min1', 'Min2', 'Mod', 'Typ'], dtype='object', name='Functional')\\n\\nGarageType Index(['Attchd', 'Basment', 'BuiltIn', 'Detchd'], dtype='object', name='GarageType')\\n\\nPavedDrive Index(['N', 'P', 'Y'], dtype='object', name='PavedDrive')\\n\\nPoolQC Index(['Missing'], dtype='object', name='PoolQC')\\n\\nMiscFeature Index(['Missing', 'Shed'], dtype='object', name='MiscFeature')\\n\\nSaleType Index(['COD', 'New', 'WD'], dtype='object', name='SaleType')\\n\\nSaleCondition Index(['Abnorml', 'Family', 'Normal', 'Partial'], dtype='object', name='SaleCondition')\\n\\nMSSubClass Int64Index([20, 30, 50, 60, 70, 75, 80, 85, 90, 120, 160, 190], dtype='int64', name='MSSubClass')\\n\\n\"\n ]\n ],\n [\n [\n \"### Encoding of categorical variables\\n\\nNext, we need to transform the strings of the categorical variables into numbers. \\n\\nWe will do it so that we capture the monotonic relationship between the label and the target.\\n\\nTo learn more about how to encode categorical variables visit our course [Feature Engineering for Machine Learning](https://www.udemy.com/course/feature-engineering-for-machine-learning/?referralCode=A855148E05283015CF06) in Udemy.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# this function will assign discrete values to the strings of the variables,\\n# so that the smaller value corresponds to the category that shows the smaller\\n# mean house sale price\\n\\ndef replace_categories(train, test, y_train, var, target):\\n \\n tmp = pd.concat([X_train, y_train], axis=1)\\n \\n # order the categories in a variable from that with the lowest\\n # house sale price, to that with the highest\\n ordered_labels = tmp.groupby([var])[target].mean().sort_values().index\\n\\n # create a dictionary of ordered categories to integer values\\n ordinal_label = {k: i for i, k in enumerate(ordered_labels, 0)}\\n \\n print(var, ordinal_label)\\n print()\\n\\n # use the dictionary to replace the categorical strings by integers\\n train[var] = train[var].map(ordinal_label)\\n test[var] = test[var].map(ordinal_label)\",\n \"_____no_output_____\"\n ],\n [\n \"for var in cat_others:\\n replace_categories(X_train, X_test, y_train, var, 'SalePrice')\",\n \"MSZoning {'Rare': 0, 'RM': 1, 'RH': 2, 'RL': 3, 'FV': 4}\\n\\nStreet {'Rare': 0, 'Pave': 1}\\n\\nAlley {'Grvl': 0, 'Pave': 1, 'Missing': 2}\\n\\nLotShape {'Reg': 0, 'IR1': 1, 'Rare': 2, 'IR2': 3}\\n\\nLandContour {'Bnk': 0, 'Lvl': 1, 'Low': 2, 'HLS': 3}\\n\\nUtilities {'Rare': 0, 'AllPub': 1}\\n\\nLotConfig {'Inside': 0, 'FR2': 1, 'Corner': 2, 'Rare': 3, 'CulDSac': 4}\\n\\nLandSlope {'Gtl': 0, 'Mod': 1, 'Rare': 2}\\n\\nNeighborhood {'IDOTRR': 0, 'MeadowV': 1, 'BrDale': 2, 'Edwards': 3, 'BrkSide': 4, 'OldTown': 5, 'Sawyer': 6, 'SWISU': 7, 'NAmes': 8, 'Mitchel': 9, 'SawyerW': 10, 'Rare': 11, 'NWAmes': 12, 'Gilbert': 13, 'Blmngtn': 14, 'CollgCr': 15, 'Crawfor': 16, 'ClearCr': 17, 'Somerst': 18, 'Timber': 19, 'StoneBr': 20, 'NridgHt': 21, 'NoRidge': 22}\\n\\nCondition1 {'Artery': 0, 'Feedr': 1, 'Norm': 2, 'RRAn': 3, 'Rare': 4, 'PosN': 5}\\n\\nCondition2 {'Rare': 0, 'Norm': 1}\\n\\nBldgType {'2fmCon': 0, 'Duplex': 1, 'Twnhs': 2, '1Fam': 3, 'TwnhsE': 4}\\n\\nHouseStyle {'SFoyer': 0, '1.5Fin': 1, 'Rare': 2, '1Story': 3, 'SLvl': 4, '2Story': 5}\\n\\nRoofStyle {'Gable': 0, 'Rare': 1, 'Hip': 2}\\n\\nRoofMatl {'CompShg': 0, 'Rare': 1}\\n\\nExterior1st {'AsbShng': 0, 'Wd Sdng': 1, 'WdShing': 2, 'MetalSd': 3, 'Stucco': 4, 'Rare': 5, 'HdBoard': 6, 'Plywood': 7, 'BrkFace': 8, 'CemntBd': 9, 'VinylSd': 10}\\n\\nExterior2nd {'AsbShng': 0, 'Wd Sdng': 1, 'MetalSd': 2, 'Wd Shng': 3, 'Stucco': 4, 'Rare': 5, 'HdBoard': 6, 'Plywood': 7, 'BrkFace': 8, 'CmentBd': 9, 'VinylSd': 10}\\n\\nMasVnrType {'Rare': 0, 'None': 1, 'BrkFace': 2, 'Stone': 3}\\n\\nFoundation {'Slab': 0, 'BrkTil': 1, 'CBlock': 2, 'Rare': 3, 'PConc': 4}\\n\\nHeating {'Rare': 0, 'GasW': 1, 'GasA': 2}\\n\\nCentralAir {'N': 0, 'Y': 1}\\n\\nElectrical {'Rare': 0, 'FuseF': 1, 'FuseA': 2, 'SBrkr': 3}\\n\\nFunctional {'Rare': 0, 'Min2': 1, 'Mod': 2, 'Min1': 3, 'Typ': 4}\\n\\nGarageType {'Rare': 0, 'Detchd': 1, 'Basment': 2, 'Attchd': 3, 'BuiltIn': 4}\\n\\nPavedDrive {'N': 0, 'P': 1, 'Y': 2}\\n\\nPoolQC {'Missing': 0, 'Rare': 1}\\n\\nMiscFeature {'Rare': 0, 'Shed': 1, 'Missing': 2}\\n\\nSaleType {'COD': 0, 'Rare': 1, 'WD': 2, 'New': 3}\\n\\nSaleCondition {'Rare': 0, 'Abnorml': 1, 'Family': 2, 'Normal': 3, 'Partial': 4}\\n\\nMSSubClass {30: 0, 'Rare': 1, 190: 2, 90: 3, 160: 4, 50: 5, 85: 6, 70: 7, 80: 8, 20: 9, 75: 10, 120: 11, 60: 12}\\n\\n\"\n ],\n [\n \"# check absence of na in the train set\\n[var for var in X_train.columns if X_train[var].isnull().sum() > 0]\",\n \"_____no_output_____\"\n ],\n [\n \"# check absence of na in the test set\\n[var for var in X_test.columns if X_test[var].isnull().sum() > 0]\",\n \"_____no_output_____\"\n ],\n [\n \"# let me show you what I mean by monotonic relationship\\n# between labels and target\\n\\ndef analyse_vars(train, y_train, var):\\n \\n # function plots median house sale price per encoded\\n # category\\n \\n tmp = pd.concat([X_train, np.log(y_train)], axis=1)\\n \\n tmp.groupby(var)['SalePrice'].median().plot.bar()\\n plt.title(var)\\n plt.ylim(2.2, 2.6)\\n plt.ylabel('SalePrice')\\n plt.show()\\n \\nfor var in cat_others:\\n analyse_vars(X_train, y_train, var)\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"The monotonic relationship is particularly clear for the variables MSZoning and Neighborhood. Note how, the higher the integer that now represents the category, the higher the mean house sale price.\\n\\n(remember that the target is log-transformed, that is why the differences seem so small).\",\n \"_____no_output_____\"\n ],\n [\n \"## Feature Scaling\\n\\nFor use in linear models, features need to be either scaled. We will scale features to the minimum and maximum values:\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# create scaler\\nscaler = MinMaxScaler()\\n\\n# fit the scaler to the train set\\nscaler.fit(X_train) \\n\\n# transform the train and test set\\n\\n# sklearn returns numpy arrays, so we wrap the\\n# array with a pandas dataframe\\n\\nX_train = pd.DataFrame(\\n scaler.transform(X_train),\\n columns=X_train.columns\\n)\\n\\nX_test = pd.DataFrame(\\n scaler.transform(X_test),\\n columns=X_train.columns\\n)\",\n \"_____no_output_____\"\n ],\n [\n \"X_train.head()\",\n \"_____no_output_____\"\n ],\n [\n \"# let's now save the train and test sets for the next notebook!\\n\\nX_train.to_csv('xtrain.csv', index=False)\\nX_test.to_csv('xtest.csv', index=False)\\n\\ny_train.to_csv('ytrain.csv', index=False)\\ny_test.to_csv('ytest.csv', index=False)\",\n \"_____no_output_____\"\n ],\n [\n \"# now let's save the scaler\\n\\njoblib.dump(scaler, 'minmax_scaler.joblib') \",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"That concludes the feature engineering section.\\n\\n# Additional Resources\\n\\n- [Feature Engineering for Machine Learning](https://www.udemy.com/course/feature-engineering-for-machine-learning/?referralCode=A855148E05283015CF06) - Online Course\\n- [Packt Feature Engineering Cookbook](https://www.packtpub.com/data/python-feature-engineering-cookbook) - Book\\n- [Feature Engineering for Machine Learning: A comprehensive Overview](https://trainindata.medium.com/feature-engineering-for-machine-learning-a-comprehensive-overview-a7ad04c896f8) - Article\\n- [Practical Code Implementations of Feature Engineering for Machine Learning with Python](https://towardsdatascience.com/practical-code-implementations-of-feature-engineering-for-machine-learning-with-python-f13b953d4bcd) - Article\",\n \"_____no_output_____\"\n ]\n ]\n]"},"cell_types":{"kind":"list like","value":["markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown"],"string":"[\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\"\n]"},"cell_type_groups":{"kind":"list like","value":[["markdown","markdown"],["code","code"],["markdown"],["code"],["markdown","markdown"],["code"],["markdown"],["code","code","code","code","code","code","code","code"],["markdown"],["code","code","code","code","code"],["markdown"],["code","code","code"],["markdown"],["code","code","code"],["markdown"],["code","code","code"],["markdown"],["code"],["markdown"],["code","code","code","code","code","code"],["markdown"],["code","code"],["markdown"],["code","code","code","code","code"],["markdown","markdown"],["code","code","code","code"],["markdown"]],"string":"[\n [\n \"markdown\",\n \"markdown\"\n ],\n [\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\",\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\"\n ],\n [\n \"markdown\",\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ]\n]"}}},{"rowIdx":1459059,"cells":{"hexsha":{"kind":"string","value":"e7ef6b29046c1a956207e2f6b9dab5c36c509d13"},"size":{"kind":"number","value":52325,"string":"52,325"},"ext":{"kind":"string","value":"ipynb"},"lang":{"kind":"string","value":"Jupyter Notebook"},"max_stars_repo_path":{"kind":"string","value":"02_Filtering_&_Sorting/Fictional Army/Exercise_with_solutions.ipynb"},"max_stars_repo_name":{"kind":"string","value":"ViniciusRFerraz/pandas_exercises"},"max_stars_repo_head_hexsha":{"kind":"string","value":"14197b19f966a06f64b6811c42247e62a7160d58"},"max_stars_repo_licenses":{"kind":"list like","value":["BSD-3-Clause"],"string":"[\n \"BSD-3-Clause\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"02_Filtering_&_Sorting/Fictional Army/Exercise_with_solutions.ipynb"},"max_issues_repo_name":{"kind":"string","value":"ViniciusRFerraz/pandas_exercises"},"max_issues_repo_head_hexsha":{"kind":"string","value":"14197b19f966a06f64b6811c42247e62a7160d58"},"max_issues_repo_licenses":{"kind":"list like","value":["BSD-3-Clause"],"string":"[\n \"BSD-3-Clause\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"02_Filtering_&_Sorting/Fictional Army/Exercise_with_solutions.ipynb"},"max_forks_repo_name":{"kind":"string","value":"ViniciusRFerraz/pandas_exercises"},"max_forks_repo_head_hexsha":{"kind":"string","value":"14197b19f966a06f64b6811c42247e62a7160d58"},"max_forks_repo_licenses":{"kind":"list like","value":["BSD-3-Clause"],"string":"[\n \"BSD-3-Clause\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"avg_line_length":{"kind":"number","value":29.3960674157,"string":"29.396067"},"max_line_length":{"kind":"number","value":253,"string":"253"},"alphanum_fraction":{"kind":"number","value":0.3078451983,"string":"0.307845"},"cells":{"kind":"list like","value":[[["# Fictional Army - Filtering and Sorting","_____no_output_____"],["### Introduction:\n\nThis exercise was inspired by this [page](http://chrisalbon.com/python/)\n\nSpecial thanks to: https://github.com/chrisalbon for sharing the dataset and materials.\n\n### Step 1. Import the necessary libraries","_____no_output_____"]],[["import pandas as pd","_____no_output_____"]],[["### Step 2. This is the data given as a dictionary","_____no_output_____"]],[["# Create an example dataframe about a fictional army\nraw_data = {'regiment': ['Nighthawks', 'Nighthawks', 'Nighthawks', 'Nighthawks', 'Dragoons', 'Dragoons', 'Dragoons', 'Dragoons', 'Scouts', 'Scouts', 'Scouts', 'Scouts'],\n 'company': ['1st', '1st', '2nd', '2nd', '1st', '1st', '2nd', '2nd','1st', '1st', '2nd', '2nd'],\n 'deaths': [523, 52, 25, 616, 43, 234, 523, 62, 62, 73, 37, 35],\n 'battles': [5, 42, 2, 2, 4, 7, 8, 3, 4, 7, 8, 9],\n 'size': [1045, 957, 1099, 1400, 1592, 1006, 987, 849, 973, 1005, 1099, 1523],\n 'veterans': [1, 5, 62, 26, 73, 37, 949, 48, 48, 435, 63, 345],\n 'readiness': [1, 2, 3, 3, 2, 1, 2, 3, 2, 1, 2, 3],\n 'armored': [1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1],\n 'deserters': [4, 24, 31, 2, 3, 4, 24, 31, 2, 3, 2, 3],\n 'origin': ['Arizona', 'California', 'Texas', 'Florida', 'Maine', 'Iowa', 'Alaska', 'Washington', 'Oregon', 'Wyoming', 'Louisana', 'Georgia']}","_____no_output_____"]],[["### Step 3. Create a dataframe and assign it to a variable called army. \n\n#### Don't forget to include the columns names in the order presented in the dictionary ('regiment', 'company', 'deaths'...) so that the column index order is consistent with the solutions. If omitted, pandas will order the columns alphabetically.","_____no_output_____"]],[["army = pd.DataFrame(raw_data, columns = ['regiment', 'company', 'deaths', 'battles', 'size', 'veterans', 'readiness', 'armored', 'deserters', 'origin'])","_____no_output_____"]],[["### Step 4. Set the 'origin' colum as the index of the dataframe","_____no_output_____"]],[["army = army.set_index('origin')\narmy","_____no_output_____"]],[["### Step 5. Print only the column veterans","_____no_output_____"]],[["army['veterans']","_____no_output_____"]],[["### Step 6. Print the columns 'veterans' and 'deaths'","_____no_output_____"]],[["army[['veterans', 'deaths']]","_____no_output_____"]],[["### Step 7. Print the name of all the columns.","_____no_output_____"]],[["army.columns","_____no_output_____"]],[["### Step 8. Select the 'deaths', 'size' and 'deserters' columns from Maine and Alaska","_____no_output_____"]],[["# Select all rows with the index label \"Maine\" and \"Alaska\"\narmy.loc[['Maine','Alaska'] , [\"deaths\",\"size\",\"deserters\"]]","_____no_output_____"]],[["### Step 9. Select the rows 3 to 7 and the columns 3 to 6","_____no_output_____"]],[["#\narmy.iloc[3:7, 3:6]","_____no_output_____"]],[["### Step 10. Select every row after the fourth row","_____no_output_____"]],[["army.iloc[3:]","_____no_output_____"]],[["### Step 11. Select every row up to the 4th row","_____no_output_____"]],[["army.iloc[:3]","_____no_output_____"]],[["### Step 12. Select the 3rd column up to the 7th column","_____no_output_____"]],[["# the first : means all\n# after the comma you select the range\n\narmy.iloc[: , 4:7]","_____no_output_____"]],[["### Step 13. Select rows where df.deaths is greater than 50","_____no_output_____"]],[["army[army['deaths'] > 50]","_____no_output_____"]],[["### Step 14. Select rows where df.deaths is greater than 500 or less than 50","_____no_output_____"]],[["army[(army['deaths'] > 500) | (army['deaths'] < 50)]","_____no_output_____"]],[["### Step 15. Select all the regiments not named \"Dragoons\"","_____no_output_____"]],[["army[(army['regiment'] != 'Dragoons')]","_____no_output_____"]],[["### Step 16. Select the rows called Texas and Arizona","_____no_output_____"]],[["army.loc[['Arizona', 'Texas']]","_____no_output_____"]],[["### Step 17. Select the third cell in the row named Arizona","_____no_output_____"]],[["army.loc[['Arizona'], ['deaths']]\n\n#OR\n\narmy.iloc[[0], army.columns.get_loc('deaths')]","_____no_output_____"]],[["### Step 18. Select the third cell down in the column named deaths","_____no_output_____"]],[["army.loc['Texas', 'deaths']\n\n#OR\n\narmy.iloc[[2], army.columns.get_loc('deaths')]\n","_____no_output_____"]]],"string":"[\n [\n [\n \"# Fictional Army - Filtering and Sorting\",\n \"_____no_output_____\"\n ],\n [\n \"### Introduction:\\n\\nThis exercise was inspired by this [page](http://chrisalbon.com/python/)\\n\\nSpecial thanks to: https://github.com/chrisalbon for sharing the dataset and materials.\\n\\n### Step 1. Import the necessary libraries\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"import pandas as pd\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"### Step 2. This is the data given as a dictionary\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# Create an example dataframe about a fictional army\\nraw_data = {'regiment': ['Nighthawks', 'Nighthawks', 'Nighthawks', 'Nighthawks', 'Dragoons', 'Dragoons', 'Dragoons', 'Dragoons', 'Scouts', 'Scouts', 'Scouts', 'Scouts'],\\n 'company': ['1st', '1st', '2nd', '2nd', '1st', '1st', '2nd', '2nd','1st', '1st', '2nd', '2nd'],\\n 'deaths': [523, 52, 25, 616, 43, 234, 523, 62, 62, 73, 37, 35],\\n 'battles': [5, 42, 2, 2, 4, 7, 8, 3, 4, 7, 8, 9],\\n 'size': [1045, 957, 1099, 1400, 1592, 1006, 987, 849, 973, 1005, 1099, 1523],\\n 'veterans': [1, 5, 62, 26, 73, 37, 949, 48, 48, 435, 63, 345],\\n 'readiness': [1, 2, 3, 3, 2, 1, 2, 3, 2, 1, 2, 3],\\n 'armored': [1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1],\\n 'deserters': [4, 24, 31, 2, 3, 4, 24, 31, 2, 3, 2, 3],\\n 'origin': ['Arizona', 'California', 'Texas', 'Florida', 'Maine', 'Iowa', 'Alaska', 'Washington', 'Oregon', 'Wyoming', 'Louisana', 'Georgia']}\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"### Step 3. Create a dataframe and assign it to a variable called army. \\n\\n#### Don't forget to include the columns names in the order presented in the dictionary ('regiment', 'company', 'deaths'...) so that the column index order is consistent with the solutions. If omitted, pandas will order the columns alphabetically.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"army = pd.DataFrame(raw_data, columns = ['regiment', 'company', 'deaths', 'battles', 'size', 'veterans', 'readiness', 'armored', 'deserters', 'origin'])\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"### Step 4. Set the 'origin' colum as the index of the dataframe\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"army = army.set_index('origin')\\narmy\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"### Step 5. Print only the column veterans\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"army['veterans']\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"### Step 6. Print the columns 'veterans' and 'deaths'\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"army[['veterans', 'deaths']]\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"### Step 7. Print the name of all the columns.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"army.columns\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"### Step 8. Select the 'deaths', 'size' and 'deserters' columns from Maine and Alaska\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# Select all rows with the index label \\\"Maine\\\" and \\\"Alaska\\\"\\narmy.loc[['Maine','Alaska'] , [\\\"deaths\\\",\\\"size\\\",\\\"deserters\\\"]]\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"### Step 9. Select the rows 3 to 7 and the columns 3 to 6\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"#\\narmy.iloc[3:7, 3:6]\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"### Step 10. Select every row after the fourth row\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"army.iloc[3:]\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"### Step 11. Select every row up to the 4th row\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"army.iloc[:3]\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"### Step 12. Select the 3rd column up to the 7th column\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# the first : means all\\n# after the comma you select the range\\n\\narmy.iloc[: , 4:7]\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"### Step 13. Select rows where df.deaths is greater than 50\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"army[army['deaths'] > 50]\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"### Step 14. Select rows where df.deaths is greater than 500 or less than 50\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"army[(army['deaths'] > 500) | (army['deaths'] < 50)]\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"### Step 15. Select all the regiments not named \\\"Dragoons\\\"\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"army[(army['regiment'] != 'Dragoons')]\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"### Step 16. Select the rows called Texas and Arizona\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"army.loc[['Arizona', 'Texas']]\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"### Step 17. Select the third cell in the row named Arizona\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"army.loc[['Arizona'], ['deaths']]\\n\\n#OR\\n\\narmy.iloc[[0], army.columns.get_loc('deaths')]\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"### Step 18. Select the third cell down in the column named deaths\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"army.loc['Texas', 'deaths']\\n\\n#OR\\n\\narmy.iloc[[2], army.columns.get_loc('deaths')]\\n\",\n \"_____no_output_____\"\n ]\n ]\n]"},"cell_types":{"kind":"list like","value":["markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code"],"string":"[\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\"\n]"},"cell_type_groups":{"kind":"list like","value":[["markdown","markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"]],"string":"[\n [\n \"markdown\",\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ]\n]"}}},{"rowIdx":1459060,"cells":{"hexsha":{"kind":"string","value":"e7ef6b684d8f5d38c49d79b55b65f32c1db4e837"},"size":{"kind":"number","value":2852,"string":"2,852"},"ext":{"kind":"string","value":"ipynb"},"lang":{"kind":"string","value":"Jupyter Notebook"},"max_stars_repo_path":{"kind":"string","value":"examples/reference/elements/matplotlib/Spread.ipynb"},"max_stars_repo_name":{"kind":"string","value":"stonebig/holoviews"},"max_stars_repo_head_hexsha":{"kind":"string","value":"d5270c30dd1af38a785452aeac2fbabbe528e892"},"max_stars_repo_licenses":{"kind":"list like","value":["BSD-3-Clause"],"string":"[\n \"BSD-3-Clause\"\n]"},"max_stars_count":{"kind":"number","value":2,"string":"2"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2020-08-13T00:11:46.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2021-01-31T22:13:21.000Z"},"max_issues_repo_path":{"kind":"string","value":"examples/reference/elements/matplotlib/Spread.ipynb"},"max_issues_repo_name":{"kind":"string","value":"stonebig/holoviews"},"max_issues_repo_head_hexsha":{"kind":"string","value":"d5270c30dd1af38a785452aeac2fbabbe528e892"},"max_issues_repo_licenses":{"kind":"list like","value":["BSD-3-Clause"],"string":"[\n \"BSD-3-Clause\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"examples/reference/elements/matplotlib/Spread.ipynb"},"max_forks_repo_name":{"kind":"string","value":"stonebig/holoviews"},"max_forks_repo_head_hexsha":{"kind":"string","value":"d5270c30dd1af38a785452aeac2fbabbe528e892"},"max_forks_repo_licenses":{"kind":"list like","value":["BSD-3-Clause"],"string":"[\n \"BSD-3-Clause\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"avg_line_length":{"kind":"number","value":30.6666666667,"string":"30.666667"},"max_line_length":{"kind":"number","value":518,"string":"518"},"alphanum_fraction":{"kind":"number","value":0.5880084151,"string":"0.588008"},"cells":{"kind":"list like","value":[[["

\n
\n
Title
Spread Element
\n
Dependencies
Matplotlib
\n
Backends
Matplotlib
Bokeh
\n
\n
","_____no_output_____"]],[["import numpy as np\nimport holoviews as hv\nhv.extension('matplotlib')","_____no_output_____"]],[["``Spread`` elements have the same data format as the [``ErrorBars``](ErrorBars.ipynb) element, namely x- and y-values with associated symmetric or asymmetric errors, but are interpreted as samples from a continuous distribution (just as ``Curve`` is the continuous version of ``Scatter``). These are often paired with an overlaid ``Curve`` to show an average trend along with a corresponding spread of values; see the [Tabular Datasets](../../../user_guide/07-Tabular_Datasets.ipynb) user guide for examples.\n\nNote that as the ``Spread`` element is used to add information to a plot (typically a ``Curve``) the default alpha value is less that one, making it partially transparent. \n\n##### Symmetric","_____no_output_____"],["Given two value dimensions corresponding to the position on the y-axis and the error, ``Spread`` will visualize itself assuming symmetric errors:","_____no_output_____"]],[["np.random.seed(42)\nxs = np.linspace(0, np.pi*2, 20)\nerr = 0.2+np.random.rand(len(xs))\nhv.Spread((xs, np.sin(xs), err))","_____no_output_____"]],[["##### Asymmetric","_____no_output_____"],["Given three value dimensions corresponding to the position on the y-axis, the negative error and the positive error, ``Spread`` can be used to visualize assymmetric errors:","_____no_output_____"]],[["%%opts Spread (facecolor='indianred' alpha=1)\nxs = np.linspace(0, np.pi*2, 20)\nhv.Spread((xs, np.sin(xs), 0.1+np.random.rand(len(xs)), 0.1+np.random.rand(len(xs))),\n vdims=['y', 'yerrneg', 'yerrpos'])","_____no_output_____"]]],"string":"[\n [\n [\n \"
\\n
\\n
Title
Spread Element
\\n
Dependencies
Matplotlib
\\n
Backends
Matplotlib
Bokeh
\\n
\\n
\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"import numpy as np\\nimport holoviews as hv\\nhv.extension('matplotlib')\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"``Spread`` elements have the same data format as the [``ErrorBars``](ErrorBars.ipynb) element, namely x- and y-values with associated symmetric or asymmetric errors, but are interpreted as samples from a continuous distribution (just as ``Curve`` is the continuous version of ``Scatter``). These are often paired with an overlaid ``Curve`` to show an average trend along with a corresponding spread of values; see the [Tabular Datasets](../../../user_guide/07-Tabular_Datasets.ipynb) user guide for examples.\\n\\nNote that as the ``Spread`` element is used to add information to a plot (typically a ``Curve``) the default alpha value is less that one, making it partially transparent. \\n\\n##### Symmetric\",\n \"_____no_output_____\"\n ],\n [\n \"Given two value dimensions corresponding to the position on the y-axis and the error, ``Spread`` will visualize itself assuming symmetric errors:\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"np.random.seed(42)\\nxs = np.linspace(0, np.pi*2, 20)\\nerr = 0.2+np.random.rand(len(xs))\\nhv.Spread((xs, np.sin(xs), err))\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"##### Asymmetric\",\n \"_____no_output_____\"\n ],\n [\n \"Given three value dimensions corresponding to the position on the y-axis, the negative error and the positive error, ``Spread`` can be used to visualize assymmetric errors:\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"%%opts Spread (facecolor='indianred' alpha=1)\\nxs = np.linspace(0, np.pi*2, 20)\\nhv.Spread((xs, np.sin(xs), 0.1+np.random.rand(len(xs)), 0.1+np.random.rand(len(xs))),\\n vdims=['y', 'yerrneg', 'yerrpos'])\",\n \"_____no_output_____\"\n ]\n ]\n]"},"cell_types":{"kind":"list like","value":["markdown","code","markdown","code","markdown","code"],"string":"[\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\"\n]"},"cell_type_groups":{"kind":"list like","value":[["markdown"],["code"],["markdown","markdown"],["code"],["markdown","markdown"],["code"]],"string":"[\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\",\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\",\n \"markdown\"\n ],\n [\n \"code\"\n ]\n]"}}},{"rowIdx":1459061,"cells":{"hexsha":{"kind":"string","value":"e7ef8a2bbf2b148a04e5aa43fc75b1422f21af67"},"size":{"kind":"number","value":9183,"string":"9,183"},"ext":{"kind":"string","value":"ipynb"},"lang":{"kind":"string","value":"Jupyter Notebook"},"max_stars_repo_path":{"kind":"string","value":"Chapter10/ex9.ipynb"},"max_stars_repo_name":{"kind":"string","value":"m0baxter/MLBookStuff"},"max_stars_repo_head_hexsha":{"kind":"string","value":"2f2ba275b3fe59b69c4e4f20f355f019e7bd1eac"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"Chapter10/ex9.ipynb"},"max_issues_repo_name":{"kind":"string","value":"m0baxter/MLBookStuff"},"max_issues_repo_head_hexsha":{"kind":"string","value":"2f2ba275b3fe59b69c4e4f20f355f019e7bd1eac"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"Chapter10/ex9.ipynb"},"max_forks_repo_name":{"kind":"string","value":"m0baxter/MLBookStuff"},"max_forks_repo_head_hexsha":{"kind":"string","value":"2f2ba275b3fe59b69c4e4f20f355f019e7bd1eac"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"avg_line_length":{"kind":"number","value":34.0111111111,"string":"34.011111"},"max_line_length":{"kind":"number","value":124,"string":"124"},"alphanum_fraction":{"kind":"number","value":0.483066536,"string":"0.483067"},"cells":{"kind":"list like","value":[[["import numpy as np\nimport tensorflow as tf\n\nfrom datetime import datetime","_____no_output_____"],["mnist = input_data.read_data_sets(\"/tmp/data\")\n\n_, n = mnist.train.images.shape","Extracting /tmp/data/train-images-idx3-ubyte.gz\nExtracting /tmp/data/train-labels-idx1-ubyte.gz\nExtracting /tmp/data/t10k-images-idx3-ubyte.gz\nExtracting /tmp/data/t10k-labels-idx1-ubyte.gz\n"],["now = datetime.utcnow().strftime(\"%Y%m%d%H%M%S\")\nrootLogDir = \"tfLogs\"\nlogDir = \"{0}/run-{1}/\".format(rootLogDir, now)\n\nfileWriter = tf.summary.FileWriter( logDir, tf.get_default_graph() )","_____no_output_____"],["def mnistClassifier( X, y, nOut, nl = 1, nh = 100, alpha = 0.01, momentum = 0.9 ):\n\n if ( nl < 1 ):\n print( \"You need at least one hidden layer.\" )\n return\n\n if ( nh < 1 ):\n print( \"you need at least one neuron.\" )\n return\n\n with tf.name_scope( \"dnn\" ):\n layers = [ tf.layers.dense( X, nh, name = \"hidden1\", activation = tf.nn.relu ) ]\n\n for i in range(2, nl + 1):\n layers.append( tf.layers.dense( layers[-1], nh, name = \"hidden\" + str(i), activation = tf.nn.relu ) )\n\n logits = tf.layers.dense( layers[-1], nOut, name = \"output\" )\n\n with tf.name_scope(\"loss\"):\n crossEnt = tf.nn.sparse_softmax_cross_entropy_with_logits( labels = y, logits = logits)\n loss = tf.reduce_mean( crossEnt, name = \"loss\" )\n \n with tf.name_scope(\"eval\"):\n correct = tf.nn.in_top_k(logits, y, 1)\n accuracy = tf.reduce_mean( tf.cast(correct, tf.float32) )\n \n with tf.name_scope(\"train\"):\n opt = tf.train.MomentumOptimizer( learning_rate = alpha, momentum = momentum)\n training = opt.minimize( loss )\n lossSummary = tf.summary.scalar(\"crossEntropy\", loss)\n \n with tf.name_scope(\"utility\"):\n init = tf.global_variables_initializer()\n saver = tf.train.Saver()\n \n return loss, training, accuracy, lossSummary, init, saver\n","_____no_output_____"],["X = tf.placeholder(tf.float32, shape = (None, n), name = \"X\")\ny = tf.placeholder(tf.int32, shape = (None), name = \"y\")\n\nloss, training, accuracy, lossSummary, init, saver = mnistClassifier( X, y, 10,\n nl = 4,\n nh = 200,\n alpha = 0.01,\n momentum = 0.9 )","_____no_output_____"],["nEpochs = 1000\nbatchSize = 64 #2048\n\nhiVal = 0\npatience = 0\n\nwith tf.Session() as sess:\n\n init.run()\n \n for epoch in range(nEpochs):\n for i in range( mnist.train.num_examples // batchSize ):\n \n batchX ,batchY = mnist.train.next_batch( batchSize )\n sess.run( training, feed_dict = { X : batchX, y : batchY } )\n \n trainAcc = accuracy.eval( feed_dict = { X : batchX, y : batchY } )\n valAcc = accuracy.eval( feed_dict = { X : mnist.validation.images,\n y : mnist.validation.labels } )\n\n print( epoch, \"Training:\", trainAcc, \"Validation:\", valAcc )\n\n if ( valAcc > hiVal ):\n hiVal = valAcc\n patience = 0\n\n else:\n patience += 1\n\n if ( patience >= 10):\n print(\"No imporvement on validation set after {0} epochs. Training competed\".format(patience))\n break\n \n print(\"saving model.\")\n saver.save(sess, \"./model.ckpt\")\n","0 Training: 0.96875 Validation: 0.957\n1 Training: 1.0 Validation: 0.9632\n2 Training: 0.984375 Validation: 0.9734\n3 Training: 1.0 Validation: 0.976\n4 Training: 1.0 Validation: 0.9784\n5 Training: 0.984375 Validation: 0.9784\n6 Training: 1.0 Validation: 0.9804\n7 Training: 0.96875 Validation: 0.9806\n8 Training: 1.0 Validation: 0.9826\n9 Training: 1.0 Validation: 0.9796\n10 Training: 0.984375 Validation: 0.9774\n11 Training: 0.984375 Validation: 0.9836\n12 Training: 1.0 Validation: 0.979\n13 Training: 1.0 Validation: 0.9786\n14 Training: 1.0 Validation: 0.9812\n15 Training: 0.984375 Validation: 0.9778\n16 Training: 1.0 Validation: 0.981\n17 Training: 1.0 Validation: 0.9828\n18 Training: 1.0 Validation: 0.9848\n19 Training: 1.0 Validation: 0.9842\n20 Training: 1.0 Validation: 0.985\n21 Training: 1.0 Validation: 0.9846\n22 Training: 1.0 Validation: 0.9848\n23 Training: 1.0 Validation: 0.9848\n24 Training: 1.0 Validation: 0.9846\n25 Training: 1.0 Validation: 0.985\n26 Training: 1.0 Validation: 0.985\n27 Training: 1.0 Validation: 0.9848\n28 Training: 1.0 Validation: 0.985\n29 Training: 1.0 Validation: 0.985\n30 Training: 1.0 Validation: 0.9852\n31 Training: 1.0 Validation: 0.9852\n32 Training: 1.0 Validation: 0.9854\n33 Training: 1.0 Validation: 0.9852\n34 Training: 1.0 Validation: 0.9854\n35 Training: 1.0 Validation: 0.9852\n36 Training: 1.0 Validation: 0.9854\n37 Training: 1.0 Validation: 0.9854\n38 Training: 1.0 Validation: 0.9852\n39 Training: 1.0 Validation: 0.9854\n40 Training: 1.0 Validation: 0.9854\n41 Training: 1.0 Validation: 0.9852\n42 Training: 1.0 Validation: 0.9848\nNo imporvement on validation set after 10 epochs. Training competed\nsaving model.\n"],["tf.reset_default_graph()\n#sess = tf.Session()\n\nX = tf.placeholder(tf.float32, shape = (None, n), name = \"X\")\ny = tf.placeholder(tf.int32, shape = (None), name = \"y\")\n\nloss, training, accuracy, lossSummary, init, saver = mnistClassifier( X, y, 10,\n nl = 4,\n nh = 200,\n alpha = 0.01,\n momentum = 0.9 )\n\nwith tf.Session() as sess:\n\n saver.restore( sess, \"./model.ckpt\" )\n testAcc = accuracy.eval( feed_dict = { X : mnist.test.images, y : mnist.test.labels })\n\n print( \"Accuracy on test set:\", testAcc )","INFO:tensorflow:Restoring parameters from ./model.ckpt\nAccuracy on test set: 0.9823\n"]]],"string":"[\n [\n [\n \"import numpy as np\\nimport tensorflow as tf\\n\\nfrom datetime import datetime\",\n \"_____no_output_____\"\n ],\n [\n \"mnist = input_data.read_data_sets(\\\"/tmp/data\\\")\\n\\n_, n = mnist.train.images.shape\",\n \"Extracting /tmp/data/train-images-idx3-ubyte.gz\\nExtracting /tmp/data/train-labels-idx1-ubyte.gz\\nExtracting /tmp/data/t10k-images-idx3-ubyte.gz\\nExtracting /tmp/data/t10k-labels-idx1-ubyte.gz\\n\"\n ],\n [\n \"now = datetime.utcnow().strftime(\\\"%Y%m%d%H%M%S\\\")\\nrootLogDir = \\\"tfLogs\\\"\\nlogDir = \\\"{0}/run-{1}/\\\".format(rootLogDir, now)\\n\\nfileWriter = tf.summary.FileWriter( logDir, tf.get_default_graph() )\",\n \"_____no_output_____\"\n ],\n [\n \"def mnistClassifier( X, y, nOut, nl = 1, nh = 100, alpha = 0.01, momentum = 0.9 ):\\n\\n if ( nl < 1 ):\\n print( \\\"You need at least one hidden layer.\\\" )\\n return\\n\\n if ( nh < 1 ):\\n print( \\\"you need at least one neuron.\\\" )\\n return\\n\\n with tf.name_scope( \\\"dnn\\\" ):\\n layers = [ tf.layers.dense( X, nh, name = \\\"hidden1\\\", activation = tf.nn.relu ) ]\\n\\n for i in range(2, nl + 1):\\n layers.append( tf.layers.dense( layers[-1], nh, name = \\\"hidden\\\" + str(i), activation = tf.nn.relu ) )\\n\\n logits = tf.layers.dense( layers[-1], nOut, name = \\\"output\\\" )\\n\\n with tf.name_scope(\\\"loss\\\"):\\n crossEnt = tf.nn.sparse_softmax_cross_entropy_with_logits( labels = y, logits = logits)\\n loss = tf.reduce_mean( crossEnt, name = \\\"loss\\\" )\\n \\n with tf.name_scope(\\\"eval\\\"):\\n correct = tf.nn.in_top_k(logits, y, 1)\\n accuracy = tf.reduce_mean( tf.cast(correct, tf.float32) )\\n \\n with tf.name_scope(\\\"train\\\"):\\n opt = tf.train.MomentumOptimizer( learning_rate = alpha, momentum = momentum)\\n training = opt.minimize( loss )\\n lossSummary = tf.summary.scalar(\\\"crossEntropy\\\", loss)\\n \\n with tf.name_scope(\\\"utility\\\"):\\n init = tf.global_variables_initializer()\\n saver = tf.train.Saver()\\n \\n return loss, training, accuracy, lossSummary, init, saver\\n\",\n \"_____no_output_____\"\n ],\n [\n \"X = tf.placeholder(tf.float32, shape = (None, n), name = \\\"X\\\")\\ny = tf.placeholder(tf.int32, shape = (None), name = \\\"y\\\")\\n\\nloss, training, accuracy, lossSummary, init, saver = mnistClassifier( X, y, 10,\\n nl = 4,\\n nh = 200,\\n alpha = 0.01,\\n momentum = 0.9 )\",\n \"_____no_output_____\"\n ],\n [\n \"nEpochs = 1000\\nbatchSize = 64 #2048\\n\\nhiVal = 0\\npatience = 0\\n\\nwith tf.Session() as sess:\\n\\n init.run()\\n \\n for epoch in range(nEpochs):\\n for i in range( mnist.train.num_examples // batchSize ):\\n \\n batchX ,batchY = mnist.train.next_batch( batchSize )\\n sess.run( training, feed_dict = { X : batchX, y : batchY } )\\n \\n trainAcc = accuracy.eval( feed_dict = { X : batchX, y : batchY } )\\n valAcc = accuracy.eval( feed_dict = { X : mnist.validation.images,\\n y : mnist.validation.labels } )\\n\\n print( epoch, \\\"Training:\\\", trainAcc, \\\"Validation:\\\", valAcc )\\n\\n if ( valAcc > hiVal ):\\n hiVal = valAcc\\n patience = 0\\n\\n else:\\n patience += 1\\n\\n if ( patience >= 10):\\n print(\\\"No imporvement on validation set after {0} epochs. Training competed\\\".format(patience))\\n break\\n \\n print(\\\"saving model.\\\")\\n saver.save(sess, \\\"./model.ckpt\\\")\\n\",\n \"0 Training: 0.96875 Validation: 0.957\\n1 Training: 1.0 Validation: 0.9632\\n2 Training: 0.984375 Validation: 0.9734\\n3 Training: 1.0 Validation: 0.976\\n4 Training: 1.0 Validation: 0.9784\\n5 Training: 0.984375 Validation: 0.9784\\n6 Training: 1.0 Validation: 0.9804\\n7 Training: 0.96875 Validation: 0.9806\\n8 Training: 1.0 Validation: 0.9826\\n9 Training: 1.0 Validation: 0.9796\\n10 Training: 0.984375 Validation: 0.9774\\n11 Training: 0.984375 Validation: 0.9836\\n12 Training: 1.0 Validation: 0.979\\n13 Training: 1.0 Validation: 0.9786\\n14 Training: 1.0 Validation: 0.9812\\n15 Training: 0.984375 Validation: 0.9778\\n16 Training: 1.0 Validation: 0.981\\n17 Training: 1.0 Validation: 0.9828\\n18 Training: 1.0 Validation: 0.9848\\n19 Training: 1.0 Validation: 0.9842\\n20 Training: 1.0 Validation: 0.985\\n21 Training: 1.0 Validation: 0.9846\\n22 Training: 1.0 Validation: 0.9848\\n23 Training: 1.0 Validation: 0.9848\\n24 Training: 1.0 Validation: 0.9846\\n25 Training: 1.0 Validation: 0.985\\n26 Training: 1.0 Validation: 0.985\\n27 Training: 1.0 Validation: 0.9848\\n28 Training: 1.0 Validation: 0.985\\n29 Training: 1.0 Validation: 0.985\\n30 Training: 1.0 Validation: 0.9852\\n31 Training: 1.0 Validation: 0.9852\\n32 Training: 1.0 Validation: 0.9854\\n33 Training: 1.0 Validation: 0.9852\\n34 Training: 1.0 Validation: 0.9854\\n35 Training: 1.0 Validation: 0.9852\\n36 Training: 1.0 Validation: 0.9854\\n37 Training: 1.0 Validation: 0.9854\\n38 Training: 1.0 Validation: 0.9852\\n39 Training: 1.0 Validation: 0.9854\\n40 Training: 1.0 Validation: 0.9854\\n41 Training: 1.0 Validation: 0.9852\\n42 Training: 1.0 Validation: 0.9848\\nNo imporvement on validation set after 10 epochs. Training competed\\nsaving model.\\n\"\n ],\n [\n \"tf.reset_default_graph()\\n#sess = tf.Session()\\n\\nX = tf.placeholder(tf.float32, shape = (None, n), name = \\\"X\\\")\\ny = tf.placeholder(tf.int32, shape = (None), name = \\\"y\\\")\\n\\nloss, training, accuracy, lossSummary, init, saver = mnistClassifier( X, y, 10,\\n nl = 4,\\n nh = 200,\\n alpha = 0.01,\\n momentum = 0.9 )\\n\\nwith tf.Session() as sess:\\n\\n saver.restore( sess, \\\"./model.ckpt\\\" )\\n testAcc = accuracy.eval( feed_dict = { X : mnist.test.images, y : mnist.test.labels })\\n\\n print( \\\"Accuracy on test set:\\\", testAcc )\",\n \"INFO:tensorflow:Restoring parameters from ./model.ckpt\\nAccuracy on test set: 0.9823\\n\"\n ]\n ]\n]"},"cell_types":{"kind":"list like","value":["code"],"string":"[\n \"code\"\n]"},"cell_type_groups":{"kind":"list like","value":[["code","code","code","code","code","code","code"]],"string":"[\n [\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\"\n ]\n]"}}},{"rowIdx":1459062,"cells":{"hexsha":{"kind":"string","value":"e7ef8f85087675e33538ec18e5d3ad7c09600723"},"size":{"kind":"number","value":99641,"string":"99,641"},"ext":{"kind":"string","value":"ipynb"},"lang":{"kind":"string","value":"Jupyter Notebook"},"max_stars_repo_path":{"kind":"string","value":"introductory-tutorials/intro-to-julia/10. Multiple dispatch.ipynb"},"max_stars_repo_name":{"kind":"string","value":"ljbelenky/JuliaTutorials"},"max_stars_repo_head_hexsha":{"kind":"string","value":"de4a74717e2debebfbddd815848da5292c1755e5"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"introductory-tutorials/intro-to-julia/10. Multiple dispatch.ipynb"},"max_issues_repo_name":{"kind":"string","value":"ljbelenky/JuliaTutorials"},"max_issues_repo_head_hexsha":{"kind":"string","value":"de4a74717e2debebfbddd815848da5292c1755e5"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"introductory-tutorials/intro-to-julia/10. Multiple dispatch.ipynb"},"max_forks_repo_name":{"kind":"string","value":"ljbelenky/JuliaTutorials"},"max_forks_repo_head_hexsha":{"kind":"string","value":"de4a74717e2debebfbddd815848da5292c1755e5"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"avg_line_length":{"kind":"number","value":118.6202380952,"string":"118.620238"},"max_line_length":{"kind":"number","value":54936,"string":"54,936"},"alphanum_fraction":{"kind":"number","value":0.684667958,"string":"0.684668"},"cells":{"kind":"list like","value":[[["empty"]]],"string":"[\n [\n [\n \"empty\"\n ]\n ]\n]"},"cell_types":{"kind":"list like","value":["empty"],"string":"[\n \"empty\"\n]"},"cell_type_groups":{"kind":"list like","value":[["empty"]],"string":"[\n [\n \"empty\"\n ]\n]"}}},{"rowIdx":1459063,"cells":{"hexsha":{"kind":"string","value":"e7efb26f8b4ddf5e95b05144022bf9e0f6284c8d"},"size":{"kind":"number","value":13421,"string":"13,421"},"ext":{"kind":"string","value":"ipynb"},"lang":{"kind":"string","value":"Jupyter Notebook"},"max_stars_repo_path":{"kind":"string","value":"Bengali-AI/notebooks/Views.ipynb"},"max_stars_repo_name":{"kind":"string","value":"Nandhagopalan/Struturing_Projects"},"max_stars_repo_head_hexsha":{"kind":"string","value":"0684eae86a62936a65615c34f901433251949696"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"Bengali-AI/notebooks/Views.ipynb"},"max_issues_repo_name":{"kind":"string","value":"Nandhagopalan/Struturing_Projects"},"max_issues_repo_head_hexsha":{"kind":"string","value":"0684eae86a62936a65615c34f901433251949696"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"Bengali-AI/notebooks/Views.ipynb"},"max_forks_repo_name":{"kind":"string","value":"Nandhagopalan/Struturing_Projects"},"max_forks_repo_head_hexsha":{"kind":"string","value":"0684eae86a62936a65615c34f901433251949696"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"avg_line_length":{"kind":"number","value":63.9095238095,"string":"63.909524"},"max_line_length":{"kind":"number","value":9504,"string":"9,504"},"alphanum_fraction":{"kind":"number","value":0.8292973698,"string":"0.829297"},"cells":{"kind":"list like","value":[[["import pandas as pd\nimport matplotlib.pyplot as plt\nimport sys\nimport numpy as np","_____no_output_____"],["sys.path.append('../src/')","_____no_output_____"],["from dataset import BengaliAiDataset","_____no_output_____"],["ds=BengaliAiDataset(folds=[0,1],img_height=137,img_width=236,mean=(0.485,0.456,0.406),\n std=(0.229,0.224,0.225))","_____no_output_____"],["len(ds)","_____no_output_____"],["ix=123\n\nimg=ds[ix]['image']\ngrap_root=ds[ix]['grapheme_root']\nvowel=ds[ix]['vowel_diacritic']\nconsonant=ds[ix]['consonant_diacritic']\n\nplt.imshow(np.transpose(img,[1,2,0]))","Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).\n"],["#import pretrainedmodels","_____no_output_____"],["check=pretrainedmodels.__dict__['resnet34'](pretrained='imagenet')\nprint(\"check the architecture and change the last linear layer\")","_____no_output_____"]]],"string":"[\n [\n [\n \"import pandas as pd\\nimport matplotlib.pyplot as plt\\nimport sys\\nimport numpy as np\",\n \"_____no_output_____\"\n ],\n [\n \"sys.path.append('../src/')\",\n \"_____no_output_____\"\n ],\n [\n \"from dataset import BengaliAiDataset\",\n \"_____no_output_____\"\n ],\n [\n \"ds=BengaliAiDataset(folds=[0,1],img_height=137,img_width=236,mean=(0.485,0.456,0.406),\\n std=(0.229,0.224,0.225))\",\n \"_____no_output_____\"\n ],\n [\n \"len(ds)\",\n \"_____no_output_____\"\n ],\n [\n \"ix=123\\n\\nimg=ds[ix]['image']\\ngrap_root=ds[ix]['grapheme_root']\\nvowel=ds[ix]['vowel_diacritic']\\nconsonant=ds[ix]['consonant_diacritic']\\n\\nplt.imshow(np.transpose(img,[1,2,0]))\",\n \"Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).\\n\"\n ],\n [\n \"#import pretrainedmodels\",\n \"_____no_output_____\"\n ],\n [\n \"check=pretrainedmodels.__dict__['resnet34'](pretrained='imagenet')\\nprint(\\\"check the architecture and change the last linear layer\\\")\",\n \"_____no_output_____\"\n ]\n ]\n]"},"cell_types":{"kind":"list like","value":["code"],"string":"[\n \"code\"\n]"},"cell_type_groups":{"kind":"list like","value":[["code","code","code","code","code","code","code","code"]],"string":"[\n [\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\"\n ]\n]"}}},{"rowIdx":1459064,"cells":{"hexsha":{"kind":"string","value":"e7efc28a0317348b5edf5212602a5d88ef700afd"},"size":{"kind":"number","value":1288,"string":"1,288"},"ext":{"kind":"string","value":"ipynb"},"lang":{"kind":"string","value":"Jupyter Notebook"},"max_stars_repo_path":{"kind":"string","value":"my_experiments/VES.ipynb"},"max_stars_repo_name":{"kind":"string","value":"BloonCorps/IAP2022"},"max_stars_repo_head_hexsha":{"kind":"string","value":"11a481790878defad0d2974b81ae109168306077"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"my_experiments/VES.ipynb"},"max_issues_repo_name":{"kind":"string","value":"BloonCorps/IAP2022"},"max_issues_repo_head_hexsha":{"kind":"string","value":"11a481790878defad0d2974b81ae109168306077"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"my_experiments/VES.ipynb"},"max_forks_repo_name":{"kind":"string","value":"BloonCorps/IAP2022"},"max_forks_repo_head_hexsha":{"kind":"string","value":"11a481790878defad0d2974b81ae109168306077"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"avg_line_length":{"kind":"number","value":28,"string":"28"},"max_line_length":{"kind":"number","value":194,"string":"194"},"alphanum_fraction":{"kind":"number","value":0.5326086957,"string":"0.532609"},"cells":{"kind":"list like","value":[[["# Solving Statistical Mechanics Using Variational Autoregressive Networks","_____no_output_____"],["Suppose we have some distribution:\n\n$$ p(x)=\\frac{e^{-\\beta E(x)}}{Z} $$\n\nWith the absolute free energy being:\n \n$$F = \\frac{1}{\\beta} \\ln(Z)$$\n\nNow suppose we have a distribution, $q_{\\theta}(x)$, whose parameters $\\theta$ we can optimize such that $q_{\\theta}(x)$ matches the target distribution $p(x)$ as close as possible. \n\n$D_{KL}(q_{\\theta} \\| p)$ can be used as an optimizable function.\n\nHowever, what is valuable is that minimizing $D_{KL}(q_{\\theta} \\| p)$ does not require samples from $p(x)$, or even calculating $p(x)$; notice that:\n\n$D_{KL}(q_{\\theta} \\| p) = \\sum q_{\\theta} \\ln (\\frac{q_{\\theta}(x)}{p(x)}) = \\beta(F_q - F)$ and $F_q = \\frac{1}{\\beta} \\sum q_{\\theta}\n","_____no_output_____"]]],"string":"[\n [\n [\n \"# Solving Statistical Mechanics Using Variational Autoregressive Networks\",\n \"_____no_output_____\"\n ],\n [\n \"Suppose we have some distribution:\\n\\n$$ p(x)=\\\\frac{e^{-\\\\beta E(x)}}{Z} $$\\n\\nWith the absolute free energy being:\\n \\n$$F = \\\\frac{1}{\\\\beta} \\\\ln(Z)$$\\n\\nNow suppose we have a distribution, $q_{\\\\theta}(x)$, whose parameters $\\\\theta$ we can optimize such that $q_{\\\\theta}(x)$ matches the target distribution $p(x)$ as close as possible. \\n\\n$D_{KL}(q_{\\\\theta} \\\\| p)$ can be used as an optimizable function.\\n\\nHowever, what is valuable is that minimizing $D_{KL}(q_{\\\\theta} \\\\| p)$ does not require samples from $p(x)$, or even calculating $p(x)$; notice that:\\n\\n$D_{KL}(q_{\\\\theta} \\\\| p) = \\\\sum q_{\\\\theta} \\\\ln (\\\\frac{q_{\\\\theta}(x)}{p(x)}) = \\\\beta(F_q - F)$ and $F_q = \\\\frac{1}{\\\\beta} \\\\sum q_{\\\\theta}\\n\",\n \"_____no_output_____\"\n ]\n ]\n]"},"cell_types":{"kind":"list like","value":["markdown"],"string":"[\n \"markdown\"\n]"},"cell_type_groups":{"kind":"list like","value":[["markdown","markdown"]],"string":"[\n [\n \"markdown\",\n \"markdown\"\n ]\n]"}}},{"rowIdx":1459065,"cells":{"hexsha":{"kind":"string","value":"e7efea88440a7f1b0b9e495a6bfb90bfa398395d"},"size":{"kind":"number","value":32190,"string":"32,190"},"ext":{"kind":"string","value":"ipynb"},"lang":{"kind":"string","value":"Jupyter Notebook"},"max_stars_repo_path":{"kind":"string","value":"ai-platform-unified/notebooks/unofficial/ml_metadata/sdk-metric-parameter-tracking-for-custom-jobs.ipynb"},"max_stars_repo_name":{"kind":"string","value":"thepycoder/ai-platform-samples"},"max_stars_repo_head_hexsha":{"kind":"string","value":"f055b39f77df7b4fe0467c845f1ffff2b68bed3f"},"max_stars_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2021-07-01T16:40:16.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2021-07-01T16:40:16.000Z"},"max_issues_repo_path":{"kind":"string","value":"ai-platform-unified/notebooks/unofficial/ml_metadata/sdk-metric-parameter-tracking-for-custom-jobs.ipynb"},"max_issues_repo_name":{"kind":"string","value":"amygdala/ai-platform-samples"},"max_issues_repo_head_hexsha":{"kind":"string","value":"62ec18dc30f29eb6bdcfefe229d76e5fab18584d"},"max_issues_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"ai-platform-unified/notebooks/unofficial/ml_metadata/sdk-metric-parameter-tracking-for-custom-jobs.ipynb"},"max_forks_repo_name":{"kind":"string","value":"amygdala/ai-platform-samples"},"max_forks_repo_head_hexsha":{"kind":"string","value":"62ec18dc30f29eb6bdcfefe229d76e5fab18584d"},"max_forks_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"avg_line_length":{"kind":"number","value":30.9817131858,"string":"30.981713"},"max_line_length":{"kind":"number","value":282,"string":"282"},"alphanum_fraction":{"kind":"number","value":0.5138863001,"string":"0.513886"},"cells":{"kind":"list like","value":[[["# Copyright 2021 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.","_____no_output_____"]],[["\n\n \n \n
\n \n \"Colab Run in Colab\n \n \n \n \"GitHub\n View on GitHub\n \n
","_____no_output_____"],["#Vertex AI: Track parameters and metrics for custom training jobs","_____no_output_____"],["## Overview\n\nThis notebook demonstrates how to track metrics and parameters for Vertex AI custom training jobs, and how to perform detailed analysis using this data.\n\n### Dataset\n\nThis example uses the Abalone Dataset. For more information about this dataset please visit: https://archive.ics.uci.edu/ml/datasets/abalone\n### Objective\n\nIn this notebook, you will learn how to use Vertex SDK for Python to:\n\n * Track training parameters and prediction metrics for a custom training job.\n * Extract and perform analysis for all parameters and metrics within an Experiment.\n\n### Costs \n\n\nThis tutorial uses billable components of Google Cloud:\n\n* Vertex AI\n* Cloud Storage\n\nLearn about [Vertex AI\npricing](https://cloud.google.com/vertex-ai/pricing) and [Cloud Storage\npricing](https://cloud.google.com/storage/pricing), and use the [Pricing\nCalculator](https://cloud.google.com/products/calculator/)\nto generate a cost estimate based on your projected usage.","_____no_output_____"],["### Set up your local development environment\n\n**If you are using Colab or Google Cloud Notebooks**, your environment already meets\nall the requirements to run this notebook. You can skip this step.","_____no_output_____"],["**Otherwise**, make sure your environment meets this notebook's requirements.\nYou need the following:\n\n* The Google Cloud SDK\n* Git\n* Python 3\n* virtualenv\n* Jupyter notebook running in a virtual environment with Python 3\n\nThe Google Cloud guide to [Setting up a Python development\nenvironment](https://cloud.google.com/python/setup) and the [Jupyter\ninstallation guide](https://jupyter.org/install) provide detailed instructions\nfor meeting these requirements. The following steps provide a condensed set of\ninstructions:\n\n1. [Install and initialize the Cloud SDK.](https://cloud.google.com/sdk/docs/)\n\n1. [Install Python 3.](https://cloud.google.com/python/setup#installing_python)\n\n1. [Install\n virtualenv](https://cloud.google.com/python/setup#installing_and_using_virtualenv)\n and create a virtual environment that uses Python 3. Activate the virtual environment.\n\n1. To install Jupyter, run `pip install jupyter` on the\ncommand-line in a terminal shell.\n\n1. To launch Jupyter, run `jupyter notebook` on the command-line in a terminal shell.\n\n1. Open this notebook in the Jupyter Notebook Dashboard.","_____no_output_____"],["### Install additional packages\n\nRun the following commands to install the Vertex SDK for Python.","_____no_output_____"]],[["import sys\n\nif \"google.colab\" in sys.modules:\n USER_FLAG = \"\"\nelse:\n USER_FLAG = \"--user\"","_____no_output_____"],["!python3 -m pip install {USER_FLAG} google-cloud-aiplatform --upgrade","_____no_output_____"]],[["### Restart the kernel\n\nAfter you install the additional packages, you need to restart the notebook kernel so it can find the packages.","_____no_output_____"]],[["# Automatically restart kernel after installs\nimport os\n\nif not os.getenv(\"IS_TESTING\"):\n # Automatically restart kernel after installs\n import IPython\n\n app = IPython.Application.instance()\n app.kernel.do_shutdown(True)","_____no_output_____"]],[["## Before you begin\n\n### Select a GPU runtime\n\n**Make sure you're running this notebook in a GPU runtime if you have that option. In Colab, select \"Runtime --> Change runtime type > GPU\"**","_____no_output_____"],["### Set up your Google Cloud project\n\n**The following steps are required, regardless of your notebook environment.**\n\n1. [Select or create a Google Cloud project](https://console.cloud.google.com/cloud-resource-manager). When you first create an account, you get a $300 free credit towards your compute/storage costs.\n\n1. [Make sure that billing is enabled for your project](https://cloud.google.com/billing/docs/how-to/modify-project).\n\n1. [Enable the Vertex AI API and Compute Engine API](https://console.cloud.google.com/flows/enableapi?apiid=aiplatform.googleapis.com,compute_component).\n\n1. If you are running this notebook locally, you will need to install the [Cloud SDK](https://cloud.google.com/sdk).\n\n1. Enter your project ID in the cell below. Then run the cell to make sure the\nCloud SDK uses the right project for all the commands in this notebook.\n\n**Note**: Jupyter runs lines prefixed with `!` as shell commands, and it interpolates Python variables prefixed with `$` into these commands.","_____no_output_____"],["#### Set your project ID\n\n**If you don't know your project ID**, you may be able to get your project ID using `gcloud`.","_____no_output_____"]],[["import os\n\nPROJECT_ID = \"\"\n\n# Get your Google Cloud project ID from gcloud\nif not os.getenv(\"IS_TESTING\"):\n shell_output=!gcloud config list --format 'value(core.project)' 2>/dev/null\n PROJECT_ID = shell_output[0]\n print(\"Project ID: \", PROJECT_ID)","_____no_output_____"]],[["Otherwise, set your project ID here.","_____no_output_____"]],[["if PROJECT_ID == \"\" or PROJECT_ID is None:\n PROJECT_ID = \"[your-project-id]\" # @param {type:\"string\"}","_____no_output_____"]],[["Set gcloud config to your project ID.","_____no_output_____"]],[["!gcloud config set project $PROJECT_ID","_____no_output_____"]],[["#### Timestamp\n\nIf you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append it onto the name of resources you create in this tutorial.","_____no_output_____"]],[["from datetime import datetime\n\nTIMESTAMP = datetime.now().strftime(\"%Y%m%d%H%M%S\")","_____no_output_____"]],[["### Authenticate your Google Cloud account\n\n**If you are using Google Cloud Notebooks**, your environment is already\nauthenticated. Skip this step.","_____no_output_____"],["**If you are using Colab**, run the cell below and follow the instructions\nwhen prompted to authenticate your account via oAuth.\n\n**Otherwise**, follow these steps:\n\n1. In the Cloud Console, go to the [**Create service account key**\n page](https://console.cloud.google.com/apis/credentials/serviceaccountkey).\n\n2. Click **Create service account**.\n\n3. In the **Service account name** field, enter a name, and\n click **Create**.\n\n4. In the **Grant this service account access to project** section, click the **Role** drop-down list. Type \"Vertex AI\"\ninto the filter box, and select\n **Vertex AI Administrator**. Type \"Storage Object Admin\" into the filter box, and select **Storage Object Admin**.\n\n5. Click *Create*. A JSON file that contains your key downloads to your\nlocal environment.\n\n6. Enter the path to your service account key as the\n`GOOGLE_APPLICATION_CREDENTIALS` variable in the cell below and run the cell.","_____no_output_____"]],[["import os\nimport sys\n\n# If you are running this notebook in Colab, run this cell and follow the\n# instructions to authenticate your GCP account. This provides access to your\n# Cloud Storage bucket and lets you submit training jobs and prediction\n# requests.\n\n# If on Google Cloud Notebooks, then don't execute this code\nif not os.path.exists(\"/opt/deeplearning/metadata/env_version\"):\n if \"google.colab\" in sys.modules:\n from google.colab import auth as google_auth\n\n google_auth.authenticate_user()\n\n # If you are running this notebook locally, replace the string below with the\n # path to your service account key and run this cell to authenticate your GCP\n # account.\n elif not os.getenv(\"IS_TESTING\"):\n %env GOOGLE_APPLICATION_CREDENTIALS ''","_____no_output_____"]],[["### Create a Cloud Storage bucket\n\n**The following steps are required, regardless of your notebook environment.**\n\n\nWhen you submit a training job using the Cloud SDK, you upload a Python package\ncontaining your training code to a Cloud Storage bucket. Vertex AI runs\nthe code from this package. In this tutorial, Vertex AI also saves the\ntrained model that results from your job in the same bucket. Using this model artifact, you can then\ncreate Vertex AI model and endpoint resources in order to serve\nonline predictions.\n\nSet the name of your Cloud Storage bucket below. It must be unique across all\nCloud Storage buckets.\n\nYou may also change the `REGION` variable, which is used for operations\nthroughout the rest of this notebook. Make sure to [choose a region where Vertex AI services are\navailable](https://cloud.google.com/vertex-ai/docs/general/locations#available_regions). You may\nnot use a Multi-Regional Storage bucket for training with Vertex AI.","_____no_output_____"]],[["BUCKET_NAME = \"gs://[your-bucket-name]\" # @param {type:\"string\"}\nREGION = \"[your-region]\" # @param {type:\"string\"}","_____no_output_____"],["if BUCKET_NAME == \"\" or BUCKET_NAME is None or BUCKET_NAME == \"gs://[your-bucket-name]\":\n BUCKET_NAME = \"gs://\" + PROJECT_ID + \"-aip-\" + TIMESTAMP","_____no_output_____"]],[["**Only if your bucket doesn't already exist**: Run the following cell to create your Cloud Storage bucket.","_____no_output_____"]],[["! gsutil mb -l $REGION $BUCKET_NAME","_____no_output_____"]],[["Finally, validate access to your Cloud Storage bucket by examining its contents:","_____no_output_____"]],[["! gsutil ls -al $BUCKET_NAME","_____no_output_____"]],[["### Import libraries and define constants","_____no_output_____"],["Import required libraries.\n","_____no_output_____"]],[["import pandas as pd\nfrom google.cloud import aiplatform\nfrom sklearn.metrics import mean_absolute_error, mean_squared_error\nfrom tensorflow.python.keras.utils import data_utils","_____no_output_____"]],[["## Initialize Vertex AI and set an _experiment_\n","_____no_output_____"],["Define experiment name.","_____no_output_____"]],[["EXPERIMENT_NAME = \"\" # @param {type:\"string\"}","_____no_output_____"]],[["If EXEPERIMENT_NAME is not set, set a default one below:","_____no_output_____"]],[["if EXPERIMENT_NAME == \"\" or EXPERIMENT_NAME is None:\n EXPERIMENT_NAME = \"my-experiment-\" + TIMESTAMP","_____no_output_____"]],[["Initialize the *client* for Vertex AI.","_____no_output_____"]],[["aiplatform.init(\n project=PROJECT_ID,\n location=REGION,\n staging_bucket=BUCKET_NAME,\n experiment=EXPERIMENT_NAME,\n)","_____no_output_____"]],[["## Tracking parameters and metrics in Vertex AI custom training jobs","_____no_output_____"],["This example uses the Abalone Dataset. For more information about this dataset please visit: https://archive.ics.uci.edu/ml/datasets/abalone","_____no_output_____"]],[["!wget https://storage.googleapis.com/download.tensorflow.org/data/abalone_train.csv\n!gsutil cp abalone_train.csv {BUCKET_NAME}/data/\n\ngcs_csv_path = f\"{BUCKET_NAME}/data/abalone_train.csv\"","_____no_output_____"]],[["### Create a managed tabular dataset from a CSV\n\nA Managed dataset can be used to create an AutoML model or a custom model. ","_____no_output_____"]],[["ds = aiplatform.TabularDataset.create(display_name=\"abalone\", gcs_source=[gcs_csv_path])\n\nds.resource_name","_____no_output_____"]],[["### Write the training script\n\nRun the following cell to create the training script that is used in the sample custom training job.","_____no_output_____"]],[["%%writefile training_script.py\n\nimport pandas as pd\nimport argparse\nimport os\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--epochs', dest='epochs',\n default=10, type=int,\n help='Number of epochs.')\nparser.add_argument('--num_units', dest='num_units',\n default=64, type=int,\n help='Number of unit for first layer.')\nargs = parser.parse_args()\n# uncomment and bump up replica_count for distributed training\n# strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy()\n# tf.distribute.experimental_set_strategy(strategy)\n\ncol_names = [\"Length\", \"Diameter\", \"Height\", \"Whole weight\", \"Shucked weight\", \"Viscera weight\", \"Shell weight\", \"Age\"]\ntarget = \"Age\"\n\ndef aip_data_to_dataframe(wild_card_path):\n return pd.concat([pd.read_csv(fp.numpy().decode(), names=col_names)\n for fp in tf.data.Dataset.list_files([wild_card_path])])\n\ndef get_features_and_labels(df):\n return df.drop(target, axis=1).values, df[target].values\n\ndef data_prep(wild_card_path):\n return get_features_and_labels(aip_data_to_dataframe(wild_card_path))\n\n\nmodel = tf.keras.Sequential([layers.Dense(args.num_units), layers.Dense(1)])\nmodel.compile(loss='mse', optimizer='adam')\n\nmodel.fit(*data_prep(os.environ[\"AIP_TRAINING_DATA_URI\"]),\n epochs=args.epochs ,\n validation_data=data_prep(os.environ[\"AIP_VALIDATION_DATA_URI\"]))\nprint(model.evaluate(*data_prep(os.environ[\"AIP_TEST_DATA_URI\"])))\n\n# save as Vertex AI Managed model\ntf.saved_model.save(model, os.environ[\"AIP_MODEL_DIR\"])","_____no_output_____"]],[["### Launch a custom training job and track its trainig parameters on Vertex AI ML Metadata","_____no_output_____"]],[["job = aiplatform.CustomTrainingJob(\n display_name=\"train-abalone-dist-1-replica\",\n script_path=\"training_script.py\",\n container_uri=\"gcr.io/cloud-aiplatform/training/tf-cpu.2-2:latest\",\n requirements=[\"gcsfs==0.7.1\"],\n model_serving_container_image_uri=\"gcr.io/cloud-aiplatform/prediction/tf2-cpu.2-2:latest\",\n)","_____no_output_____"]],[["Start a new experiment run to track training parameters and start the training job. Note that this operation will take around 10 mins.","_____no_output_____"]],[["aiplatform.start_run(\"custom-training-run-1\") # Change this to your desired run name\nparameters = {\"epochs\": 10, \"num_units\": 64}\naiplatform.log_params(parameters)\n\nmodel = job.run(\n ds,\n replica_count=1,\n model_display_name=\"abalone-model\",\n args=[f\"--epochs={parameters['epochs']}\", f\"--num_units={parameters['num_units']}\"],\n)","_____no_output_____"]],[["### Deploy Model and calculate prediction metrics","_____no_output_____"],["Deploy model to Google Cloud. This operation will take 10-20 mins.","_____no_output_____"]],[["endpoint = model.deploy(machine_type=\"n1-standard-4\")","_____no_output_____"]],[["Once model is deployed, perform online prediction using the `abalone_test` dataset and calculate prediction metrics.","_____no_output_____"],["Prepare the prediction dataset.","_____no_output_____"]],[["def read_data(uri):\n dataset_path = data_utils.get_file(\"auto-mpg.data\", uri)\n col_names = [\n \"Length\",\n \"Diameter\",\n \"Height\",\n \"Whole weight\",\n \"Shucked weight\",\n \"Viscera weight\",\n \"Shell weight\",\n \"Age\",\n ]\n dataset = pd.read_csv(\n dataset_path,\n names=col_names,\n na_values=\"?\",\n comment=\"\\t\",\n sep=\",\",\n skipinitialspace=True,\n )\n return dataset\n\n\ndef get_features_and_labels(df):\n target = \"Age\"\n return df.drop(target, axis=1).values, df[target].values\n\n\ntest_dataset, test_labels = get_features_and_labels(\n read_data(\n \"https://storage.googleapis.com/download.tensorflow.org/data/abalone_test.csv\"\n )\n)","_____no_output_____"]],[["Perform online prediction.","_____no_output_____"]],[["prediction = endpoint.predict(test_dataset.tolist())\nprediction","_____no_output_____"]],[["Calculate and track prediction evaluation metrics.","_____no_output_____"]],[["mse = mean_squared_error(test_labels, prediction.predictions)\nmae = mean_absolute_error(test_labels, prediction.predictions)\n\naiplatform.log_metrics({\"mse\": mse, \"mae\": mae})","_____no_output_____"]],[["### Extract all parameters and metrics created during this experiment.","_____no_output_____"]],[["aiplatform.get_experiment_df()","_____no_output_____"]],[["### View data in the Cloud Console","_____no_output_____"],["Parameters and metrics can also be viewed in the Cloud Console. \n","_____no_output_____"]],[["print(\"Vertex AI Experiments:\")\nprint(\n f\"https://console.cloud.google.com/ai/platform/experiments/experiments?folder=&organizationId=&project={PROJECT_ID}\"\n)","_____no_output_____"]],[["## Cleaning up\n\nTo clean up all Google Cloud resources used in this project, you can [delete the Google Cloud\nproject](https://cloud.google.com/resource-manager/docs/creating-managing-projects#shutting_down_projects) you used for the tutorial.\n\nOtherwise, you can delete the individual resources you created in this tutorial:\nTraining Job\nModel\nCloud Storage Bucket\n\n* Training Job\n* Model\n* Endpoint\n* Cloud Storage Bucket\n","_____no_output_____"]],[["delete_training_job = True\ndelete_model = True\ndelete_endpoint = True\n\n# Warning: Setting this to true will delete everything in your bucket\ndelete_bucket = False\n\n# Delete the training job\njob.delete()\n\n# Delete the model\nmodel.delete()\n\n# Delete the endpoint\nendpoint.delete()\n\nif delete_bucket and \"BUCKET_NAME\" in globals():\n ! gsutil -m rm -r $BUCKET_NAME","_____no_output_____"]]],"string":"[\n [\n [\n \"# Copyright 2021 Google LLC\\n#\\n# Licensed under the Apache License, Version 2.0 (the \\\"License\\\");\\n# you may not use this file except in compliance with the License.\\n# You may obtain a copy of the License at\\n#\\n# https://www.apache.org/licenses/LICENSE-2.0\\n#\\n# Unless required by applicable law or agreed to in writing, software\\n# distributed under the License is distributed on an \\\"AS IS\\\" BASIS,\\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\\n# See the License for the specific language governing permissions and\\n# limitations under the License.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"\\n\\n \\n \\n
\\n \\n \\\"Colab Run in Colab\\n \\n \\n \\n \\\"GitHub\\n View on GitHub\\n \\n
\",\n \"_____no_output_____\"\n ],\n [\n \"#Vertex AI: Track parameters and metrics for custom training jobs\",\n \"_____no_output_____\"\n ],\n [\n \"## Overview\\n\\nThis notebook demonstrates how to track metrics and parameters for Vertex AI custom training jobs, and how to perform detailed analysis using this data.\\n\\n### Dataset\\n\\nThis example uses the Abalone Dataset. For more information about this dataset please visit: https://archive.ics.uci.edu/ml/datasets/abalone\\n### Objective\\n\\nIn this notebook, you will learn how to use Vertex SDK for Python to:\\n\\n * Track training parameters and prediction metrics for a custom training job.\\n * Extract and perform analysis for all parameters and metrics within an Experiment.\\n\\n### Costs \\n\\n\\nThis tutorial uses billable components of Google Cloud:\\n\\n* Vertex AI\\n* Cloud Storage\\n\\nLearn about [Vertex AI\\npricing](https://cloud.google.com/vertex-ai/pricing) and [Cloud Storage\\npricing](https://cloud.google.com/storage/pricing), and use the [Pricing\\nCalculator](https://cloud.google.com/products/calculator/)\\nto generate a cost estimate based on your projected usage.\",\n \"_____no_output_____\"\n ],\n [\n \"### Set up your local development environment\\n\\n**If you are using Colab or Google Cloud Notebooks**, your environment already meets\\nall the requirements to run this notebook. You can skip this step.\",\n \"_____no_output_____\"\n ],\n [\n \"**Otherwise**, make sure your environment meets this notebook's requirements.\\nYou need the following:\\n\\n* The Google Cloud SDK\\n* Git\\n* Python 3\\n* virtualenv\\n* Jupyter notebook running in a virtual environment with Python 3\\n\\nThe Google Cloud guide to [Setting up a Python development\\nenvironment](https://cloud.google.com/python/setup) and the [Jupyter\\ninstallation guide](https://jupyter.org/install) provide detailed instructions\\nfor meeting these requirements. The following steps provide a condensed set of\\ninstructions:\\n\\n1. [Install and initialize the Cloud SDK.](https://cloud.google.com/sdk/docs/)\\n\\n1. [Install Python 3.](https://cloud.google.com/python/setup#installing_python)\\n\\n1. [Install\\n virtualenv](https://cloud.google.com/python/setup#installing_and_using_virtualenv)\\n and create a virtual environment that uses Python 3. Activate the virtual environment.\\n\\n1. To install Jupyter, run `pip install jupyter` on the\\ncommand-line in a terminal shell.\\n\\n1. To launch Jupyter, run `jupyter notebook` on the command-line in a terminal shell.\\n\\n1. Open this notebook in the Jupyter Notebook Dashboard.\",\n \"_____no_output_____\"\n ],\n [\n \"### Install additional packages\\n\\nRun the following commands to install the Vertex SDK for Python.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"import sys\\n\\nif \\\"google.colab\\\" in sys.modules:\\n USER_FLAG = \\\"\\\"\\nelse:\\n USER_FLAG = \\\"--user\\\"\",\n \"_____no_output_____\"\n ],\n [\n \"!python3 -m pip install {USER_FLAG} google-cloud-aiplatform --upgrade\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"### Restart the kernel\\n\\nAfter you install the additional packages, you need to restart the notebook kernel so it can find the packages.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# Automatically restart kernel after installs\\nimport os\\n\\nif not os.getenv(\\\"IS_TESTING\\\"):\\n # Automatically restart kernel after installs\\n import IPython\\n\\n app = IPython.Application.instance()\\n app.kernel.do_shutdown(True)\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"## Before you begin\\n\\n### Select a GPU runtime\\n\\n**Make sure you're running this notebook in a GPU runtime if you have that option. In Colab, select \\\"Runtime --> Change runtime type > GPU\\\"**\",\n \"_____no_output_____\"\n ],\n [\n \"### Set up your Google Cloud project\\n\\n**The following steps are required, regardless of your notebook environment.**\\n\\n1. [Select or create a Google Cloud project](https://console.cloud.google.com/cloud-resource-manager). When you first create an account, you get a $300 free credit towards your compute/storage costs.\\n\\n1. [Make sure that billing is enabled for your project](https://cloud.google.com/billing/docs/how-to/modify-project).\\n\\n1. [Enable the Vertex AI API and Compute Engine API](https://console.cloud.google.com/flows/enableapi?apiid=aiplatform.googleapis.com,compute_component).\\n\\n1. If you are running this notebook locally, you will need to install the [Cloud SDK](https://cloud.google.com/sdk).\\n\\n1. Enter your project ID in the cell below. Then run the cell to make sure the\\nCloud SDK uses the right project for all the commands in this notebook.\\n\\n**Note**: Jupyter runs lines prefixed with `!` as shell commands, and it interpolates Python variables prefixed with `$` into these commands.\",\n \"_____no_output_____\"\n ],\n [\n \"#### Set your project ID\\n\\n**If you don't know your project ID**, you may be able to get your project ID using `gcloud`.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"import os\\n\\nPROJECT_ID = \\\"\\\"\\n\\n# Get your Google Cloud project ID from gcloud\\nif not os.getenv(\\\"IS_TESTING\\\"):\\n shell_output=!gcloud config list --format 'value(core.project)' 2>/dev/null\\n PROJECT_ID = shell_output[0]\\n print(\\\"Project ID: \\\", PROJECT_ID)\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"Otherwise, set your project ID here.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"if PROJECT_ID == \\\"\\\" or PROJECT_ID is None:\\n PROJECT_ID = \\\"[your-project-id]\\\" # @param {type:\\\"string\\\"}\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"Set gcloud config to your project ID.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"!gcloud config set project $PROJECT_ID\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"#### Timestamp\\n\\nIf you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append it onto the name of resources you create in this tutorial.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"from datetime import datetime\\n\\nTIMESTAMP = datetime.now().strftime(\\\"%Y%m%d%H%M%S\\\")\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"### Authenticate your Google Cloud account\\n\\n**If you are using Google Cloud Notebooks**, your environment is already\\nauthenticated. Skip this step.\",\n \"_____no_output_____\"\n ],\n [\n \"**If you are using Colab**, run the cell below and follow the instructions\\nwhen prompted to authenticate your account via oAuth.\\n\\n**Otherwise**, follow these steps:\\n\\n1. In the Cloud Console, go to the [**Create service account key**\\n page](https://console.cloud.google.com/apis/credentials/serviceaccountkey).\\n\\n2. Click **Create service account**.\\n\\n3. In the **Service account name** field, enter a name, and\\n click **Create**.\\n\\n4. In the **Grant this service account access to project** section, click the **Role** drop-down list. Type \\\"Vertex AI\\\"\\ninto the filter box, and select\\n **Vertex AI Administrator**. Type \\\"Storage Object Admin\\\" into the filter box, and select **Storage Object Admin**.\\n\\n5. Click *Create*. A JSON file that contains your key downloads to your\\nlocal environment.\\n\\n6. Enter the path to your service account key as the\\n`GOOGLE_APPLICATION_CREDENTIALS` variable in the cell below and run the cell.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"import os\\nimport sys\\n\\n# If you are running this notebook in Colab, run this cell and follow the\\n# instructions to authenticate your GCP account. This provides access to your\\n# Cloud Storage bucket and lets you submit training jobs and prediction\\n# requests.\\n\\n# If on Google Cloud Notebooks, then don't execute this code\\nif not os.path.exists(\\\"/opt/deeplearning/metadata/env_version\\\"):\\n if \\\"google.colab\\\" in sys.modules:\\n from google.colab import auth as google_auth\\n\\n google_auth.authenticate_user()\\n\\n # If you are running this notebook locally, replace the string below with the\\n # path to your service account key and run this cell to authenticate your GCP\\n # account.\\n elif not os.getenv(\\\"IS_TESTING\\\"):\\n %env GOOGLE_APPLICATION_CREDENTIALS ''\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"### Create a Cloud Storage bucket\\n\\n**The following steps are required, regardless of your notebook environment.**\\n\\n\\nWhen you submit a training job using the Cloud SDK, you upload a Python package\\ncontaining your training code to a Cloud Storage bucket. Vertex AI runs\\nthe code from this package. In this tutorial, Vertex AI also saves the\\ntrained model that results from your job in the same bucket. Using this model artifact, you can then\\ncreate Vertex AI model and endpoint resources in order to serve\\nonline predictions.\\n\\nSet the name of your Cloud Storage bucket below. It must be unique across all\\nCloud Storage buckets.\\n\\nYou may also change the `REGION` variable, which is used for operations\\nthroughout the rest of this notebook. Make sure to [choose a region where Vertex AI services are\\navailable](https://cloud.google.com/vertex-ai/docs/general/locations#available_regions). You may\\nnot use a Multi-Regional Storage bucket for training with Vertex AI.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"BUCKET_NAME = \\\"gs://[your-bucket-name]\\\" # @param {type:\\\"string\\\"}\\nREGION = \\\"[your-region]\\\" # @param {type:\\\"string\\\"}\",\n \"_____no_output_____\"\n ],\n [\n \"if BUCKET_NAME == \\\"\\\" or BUCKET_NAME is None or BUCKET_NAME == \\\"gs://[your-bucket-name]\\\":\\n BUCKET_NAME = \\\"gs://\\\" + PROJECT_ID + \\\"-aip-\\\" + TIMESTAMP\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"**Only if your bucket doesn't already exist**: Run the following cell to create your Cloud Storage bucket.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"! gsutil mb -l $REGION $BUCKET_NAME\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"Finally, validate access to your Cloud Storage bucket by examining its contents:\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"! gsutil ls -al $BUCKET_NAME\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"### Import libraries and define constants\",\n \"_____no_output_____\"\n ],\n [\n \"Import required libraries.\\n\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"import pandas as pd\\nfrom google.cloud import aiplatform\\nfrom sklearn.metrics import mean_absolute_error, mean_squared_error\\nfrom tensorflow.python.keras.utils import data_utils\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"## Initialize Vertex AI and set an _experiment_\\n\",\n \"_____no_output_____\"\n ],\n [\n \"Define experiment name.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"EXPERIMENT_NAME = \\\"\\\" # @param {type:\\\"string\\\"}\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"If EXEPERIMENT_NAME is not set, set a default one below:\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"if EXPERIMENT_NAME == \\\"\\\" or EXPERIMENT_NAME is None:\\n EXPERIMENT_NAME = \\\"my-experiment-\\\" + TIMESTAMP\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"Initialize the *client* for Vertex AI.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"aiplatform.init(\\n project=PROJECT_ID,\\n location=REGION,\\n staging_bucket=BUCKET_NAME,\\n experiment=EXPERIMENT_NAME,\\n)\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"## Tracking parameters and metrics in Vertex AI custom training jobs\",\n \"_____no_output_____\"\n ],\n [\n \"This example uses the Abalone Dataset. For more information about this dataset please visit: https://archive.ics.uci.edu/ml/datasets/abalone\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"!wget https://storage.googleapis.com/download.tensorflow.org/data/abalone_train.csv\\n!gsutil cp abalone_train.csv {BUCKET_NAME}/data/\\n\\ngcs_csv_path = f\\\"{BUCKET_NAME}/data/abalone_train.csv\\\"\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"### Create a managed tabular dataset from a CSV\\n\\nA Managed dataset can be used to create an AutoML model or a custom model. \",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"ds = aiplatform.TabularDataset.create(display_name=\\\"abalone\\\", gcs_source=[gcs_csv_path])\\n\\nds.resource_name\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"### Write the training script\\n\\nRun the following cell to create the training script that is used in the sample custom training job.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"%%writefile training_script.py\\n\\nimport pandas as pd\\nimport argparse\\nimport os\\nimport tensorflow as tf\\nfrom tensorflow import keras\\nfrom tensorflow.keras import layers\\n\\nparser = argparse.ArgumentParser()\\nparser.add_argument('--epochs', dest='epochs',\\n default=10, type=int,\\n help='Number of epochs.')\\nparser.add_argument('--num_units', dest='num_units',\\n default=64, type=int,\\n help='Number of unit for first layer.')\\nargs = parser.parse_args()\\n# uncomment and bump up replica_count for distributed training\\n# strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy()\\n# tf.distribute.experimental_set_strategy(strategy)\\n\\ncol_names = [\\\"Length\\\", \\\"Diameter\\\", \\\"Height\\\", \\\"Whole weight\\\", \\\"Shucked weight\\\", \\\"Viscera weight\\\", \\\"Shell weight\\\", \\\"Age\\\"]\\ntarget = \\\"Age\\\"\\n\\ndef aip_data_to_dataframe(wild_card_path):\\n return pd.concat([pd.read_csv(fp.numpy().decode(), names=col_names)\\n for fp in tf.data.Dataset.list_files([wild_card_path])])\\n\\ndef get_features_and_labels(df):\\n return df.drop(target, axis=1).values, df[target].values\\n\\ndef data_prep(wild_card_path):\\n return get_features_and_labels(aip_data_to_dataframe(wild_card_path))\\n\\n\\nmodel = tf.keras.Sequential([layers.Dense(args.num_units), layers.Dense(1)])\\nmodel.compile(loss='mse', optimizer='adam')\\n\\nmodel.fit(*data_prep(os.environ[\\\"AIP_TRAINING_DATA_URI\\\"]),\\n epochs=args.epochs ,\\n validation_data=data_prep(os.environ[\\\"AIP_VALIDATION_DATA_URI\\\"]))\\nprint(model.evaluate(*data_prep(os.environ[\\\"AIP_TEST_DATA_URI\\\"])))\\n\\n# save as Vertex AI Managed model\\ntf.saved_model.save(model, os.environ[\\\"AIP_MODEL_DIR\\\"])\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"### Launch a custom training job and track its trainig parameters on Vertex AI ML Metadata\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"job = aiplatform.CustomTrainingJob(\\n display_name=\\\"train-abalone-dist-1-replica\\\",\\n script_path=\\\"training_script.py\\\",\\n container_uri=\\\"gcr.io/cloud-aiplatform/training/tf-cpu.2-2:latest\\\",\\n requirements=[\\\"gcsfs==0.7.1\\\"],\\n model_serving_container_image_uri=\\\"gcr.io/cloud-aiplatform/prediction/tf2-cpu.2-2:latest\\\",\\n)\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"Start a new experiment run to track training parameters and start the training job. Note that this operation will take around 10 mins.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"aiplatform.start_run(\\\"custom-training-run-1\\\") # Change this to your desired run name\\nparameters = {\\\"epochs\\\": 10, \\\"num_units\\\": 64}\\naiplatform.log_params(parameters)\\n\\nmodel = job.run(\\n ds,\\n replica_count=1,\\n model_display_name=\\\"abalone-model\\\",\\n args=[f\\\"--epochs={parameters['epochs']}\\\", f\\\"--num_units={parameters['num_units']}\\\"],\\n)\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"### Deploy Model and calculate prediction metrics\",\n \"_____no_output_____\"\n ],\n [\n \"Deploy model to Google Cloud. This operation will take 10-20 mins.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"endpoint = model.deploy(machine_type=\\\"n1-standard-4\\\")\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"Once model is deployed, perform online prediction using the `abalone_test` dataset and calculate prediction metrics.\",\n \"_____no_output_____\"\n ],\n [\n \"Prepare the prediction dataset.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"def read_data(uri):\\n dataset_path = data_utils.get_file(\\\"auto-mpg.data\\\", uri)\\n col_names = [\\n \\\"Length\\\",\\n \\\"Diameter\\\",\\n \\\"Height\\\",\\n \\\"Whole weight\\\",\\n \\\"Shucked weight\\\",\\n \\\"Viscera weight\\\",\\n \\\"Shell weight\\\",\\n \\\"Age\\\",\\n ]\\n dataset = pd.read_csv(\\n dataset_path,\\n names=col_names,\\n na_values=\\\"?\\\",\\n comment=\\\"\\\\t\\\",\\n sep=\\\",\\\",\\n skipinitialspace=True,\\n )\\n return dataset\\n\\n\\ndef get_features_and_labels(df):\\n target = \\\"Age\\\"\\n return df.drop(target, axis=1).values, df[target].values\\n\\n\\ntest_dataset, test_labels = get_features_and_labels(\\n read_data(\\n \\\"https://storage.googleapis.com/download.tensorflow.org/data/abalone_test.csv\\\"\\n )\\n)\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"Perform online prediction.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"prediction = endpoint.predict(test_dataset.tolist())\\nprediction\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"Calculate and track prediction evaluation metrics.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"mse = mean_squared_error(test_labels, prediction.predictions)\\nmae = mean_absolute_error(test_labels, prediction.predictions)\\n\\naiplatform.log_metrics({\\\"mse\\\": mse, \\\"mae\\\": mae})\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"### Extract all parameters and metrics created during this experiment.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"aiplatform.get_experiment_df()\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"### View data in the Cloud Console\",\n \"_____no_output_____\"\n ],\n [\n \"Parameters and metrics can also be viewed in the Cloud Console. \\n\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"print(\\\"Vertex AI Experiments:\\\")\\nprint(\\n f\\\"https://console.cloud.google.com/ai/platform/experiments/experiments?folder=&organizationId=&project={PROJECT_ID}\\\"\\n)\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"## Cleaning up\\n\\nTo clean up all Google Cloud resources used in this project, you can [delete the Google Cloud\\nproject](https://cloud.google.com/resource-manager/docs/creating-managing-projects#shutting_down_projects) you used for the tutorial.\\n\\nOtherwise, you can delete the individual resources you created in this tutorial:\\nTraining Job\\nModel\\nCloud Storage Bucket\\n\\n* Training Job\\n* Model\\n* Endpoint\\n* Cloud Storage Bucket\\n\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"delete_training_job = True\\ndelete_model = True\\ndelete_endpoint = True\\n\\n# Warning: Setting this to true will delete everything in your bucket\\ndelete_bucket = False\\n\\n# Delete the training job\\njob.delete()\\n\\n# Delete the model\\nmodel.delete()\\n\\n# Delete the endpoint\\nendpoint.delete()\\n\\nif delete_bucket and \\\"BUCKET_NAME\\\" in globals():\\n ! gsutil -m rm -r $BUCKET_NAME\",\n \"_____no_output_____\"\n ]\n ]\n]"},"cell_types":{"kind":"list like","value":["code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code"],"string":"[\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\"\n]"},"cell_type_groups":{"kind":"list like","value":[["code"],["markdown","markdown","markdown","markdown","markdown","markdown"],["code","code"],["markdown"],["code"],["markdown","markdown","markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown","markdown"],["code"],["markdown"],["code","code"],["markdown"],["code"],["markdown"],["code"],["markdown","markdown"],["code"],["markdown","markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown","markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown","markdown"],["code"],["markdown","markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown","markdown"],["code"],["markdown"],["code"]],"string":"[\n [\n \"code\"\n ],\n [\n \"markdown\",\n \"markdown\",\n \"markdown\",\n \"markdown\",\n \"markdown\",\n \"markdown\"\n ],\n [\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\",\n \"markdown\",\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\",\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\",\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\",\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\",\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\",\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\",\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\",\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ]\n]"}}},{"rowIdx":1459066,"cells":{"hexsha":{"kind":"string","value":"e7efee9c83315f4a75d2477814513ab3dee16009"},"size":{"kind":"number","value":2245,"string":"2,245"},"ext":{"kind":"string","value":"ipynb"},"lang":{"kind":"string","value":"Jupyter Notebook"},"max_stars_repo_path":{"kind":"string","value":"content/lessons/04/End-To-End-Example/ETEE-Password-Program.ipynb"},"max_stars_repo_name":{"kind":"string","value":"MahopacHS/spring-2020-Lamk0810"},"max_stars_repo_head_hexsha":{"kind":"string","value":"4b870cbd094d5813a0dc92afcbf7b0e37968ba53"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"content/lessons/04/End-To-End-Example/ETEE-Password-Program.ipynb"},"max_issues_repo_name":{"kind":"string","value":"MahopacHS/spring-2020-Lamk0810"},"max_issues_repo_head_hexsha":{"kind":"string","value":"4b870cbd094d5813a0dc92afcbf7b0e37968ba53"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"content/lessons/04/End-To-End-Example/ETEE-Password-Program.ipynb"},"max_forks_repo_name":{"kind":"string","value":"MahopacHS/spring-2020-Lamk0810"},"max_forks_repo_head_hexsha":{"kind":"string","value":"4b870cbd094d5813a0dc92afcbf7b0e37968ba53"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"avg_line_length":{"kind":"number","value":34.0151515152,"string":"34.015152"},"max_line_length":{"kind":"number","value":348,"string":"348"},"alphanum_fraction":{"kind":"number","value":0.528285078,"string":"0.528285"},"cells":{"kind":"list like","value":[[["# End-To-End Example: Password Program\n\nPassword Program:\n\n- 5 attempts for the password\n- On correct password, print: “Access Granted”, then end the program \n- On incorrect password “Invalid Password Attempt #” and give the user another try\n- After 5 attempts, print “You are locked out”. Then end the program.\n","_____no_output_____"]],[["secret = \"rhubarb\"\nattempts = 0\nwhile True:\n password = input(\"Enter Password: \")\n attempts= attempts + 1\n if password == secret:\n print(\"Access Granted!\")\n break \n print(\"Invalid password attempt #\",attempts)\n if attempts == 5:\n print(\"You are locked out\")\n break","Enter Password: sd\nInvalid password attempt # 1\nEnter Password: fds\nInvalid password attempt # 2\nEnter Password: sd\nInvalid password attempt # 3\nEnter Password: d\nInvalid password attempt # 4\nEnter Password: d\nInvalid password attempt # 5\nYou are locked out\n"]]],"string":"[\n [\n [\n \"# End-To-End Example: Password Program\\n\\nPassword Program:\\n\\n- 5 attempts for the password\\n- On correct password, print: “Access Granted”, then end the program \\n- On incorrect password “Invalid Password Attempt #” and give the user another try\\n- After 5 attempts, print “You are locked out”. Then end the program.\\n\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"secret = \\\"rhubarb\\\"\\nattempts = 0\\nwhile True:\\n password = input(\\\"Enter Password: \\\")\\n attempts= attempts + 1\\n if password == secret:\\n print(\\\"Access Granted!\\\")\\n break \\n print(\\\"Invalid password attempt #\\\",attempts)\\n if attempts == 5:\\n print(\\\"You are locked out\\\")\\n break\",\n \"Enter Password: sd\\nInvalid password attempt # 1\\nEnter Password: fds\\nInvalid password attempt # 2\\nEnter Password: sd\\nInvalid password attempt # 3\\nEnter Password: d\\nInvalid password attempt # 4\\nEnter Password: d\\nInvalid password attempt # 5\\nYou are locked out\\n\"\n ]\n ]\n]"},"cell_types":{"kind":"list like","value":["markdown","code"],"string":"[\n \"markdown\",\n \"code\"\n]"},"cell_type_groups":{"kind":"list like","value":[["markdown"],["code"]],"string":"[\n [\n \"markdown\"\n ],\n [\n \"code\"\n ]\n]"}}},{"rowIdx":1459067,"cells":{"hexsha":{"kind":"string","value":"e7f003ec6e510f789521d605f1b1ef880282e50f"},"size":{"kind":"number","value":540160,"string":"540,160"},"ext":{"kind":"string","value":"ipynb"},"lang":{"kind":"string","value":"Jupyter Notebook"},"max_stars_repo_path":{"kind":"string","value":"vpc_2018/lab/Lab_VpC_FelixRojoLapalma_003.ipynb"},"max_stars_repo_name":{"kind":"string","value":"felixlapalma/diplodatos_2018"},"max_stars_repo_head_hexsha":{"kind":"string","value":"ec36c646ca08902c676e6c947acfa7d328fcf22d"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"vpc_2018/lab/Lab_VpC_FelixRojoLapalma_003.ipynb"},"max_issues_repo_name":{"kind":"string","value":"felixlapalma/diplodatos_2018"},"max_issues_repo_head_hexsha":{"kind":"string","value":"ec36c646ca08902c676e6c947acfa7d328fcf22d"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"vpc_2018/lab/Lab_VpC_FelixRojoLapalma_003.ipynb"},"max_forks_repo_name":{"kind":"string","value":"felixlapalma/diplodatos_2018"},"max_forks_repo_head_hexsha":{"kind":"string","value":"ec36c646ca08902c676e6c947acfa7d328fcf22d"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"avg_line_length":{"kind":"number","value":386.1043602573,"string":"386.10436"},"max_line_length":{"kind":"number","value":200376,"string":"200,376"},"alphanum_fraction":{"kind":"number","value":0.9139625296,"string":"0.913963"},"cells":{"kind":"list like","value":[[["# Final Lab\n\n*Felix Rojo Lapalma*\n\n## Main task\n\nIn this notebook, we will apply transfer learning techniques to finetune the [MobileNet](https://arxiv.org/pdf/1704.04861.pdf) CNN on [Cifar-10](https://www.cs.toronto.edu/~kriz/cifar.html) dataset.\n\n## Procedures\n\nIn general, the main steps that we will follow are:\n\n1. Load data, analyze and split in *training*/*validation*/*testing* sets.\n2. Load CNN and analyze architecture.\n3. Adapt this CNN to our problem.\n4. Setup data augmentation techniques.\n5. Add some keras callbacks.\n6. Setup optimization algorithm with their hyperparameters.\n7. Train model!\n8. Choose best model/snapshot.\n9. Evaluate final model on the *testing* set.\n","_____no_output_____"]],[["# load libs\nimport os\nimport matplotlib.pyplot as plt\nfrom IPython.display import SVG\n# https://keras.io/applications/#documentation-for-individual-models\nfrom keras.applications.mobilenet import MobileNet\nfrom keras.datasets import cifar10\nfrom keras.models import Model\nfrom keras.utils.vis_utils import model_to_dot\nfrom keras.layers import Dense, GlobalAveragePooling2D,Dropout\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.utils import plot_model, to_categorical\nfrom sklearn.model_selection import train_test_split\nimport cv2\nimport numpy as np\nimport tensorflow as tf","Using TensorFlow backend.\n"]],[["#### cuda","_____no_output_____"]],[["cuda_flag=False\n\nif cuda_flag:\n # Setup one GPU for tensorflow (don't be greedy).\n os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\n # The GPU id to use, \"0\", \"1\", etc.\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\" \n # Limit tensorflow gpu usage.\n # Maybe you should comment this lines if you run tensorflow on CPU.\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n config.gpu_options.per_process_gpu_memory_fraction = 0.3\n sess = tf.Session(config=config)\n","_____no_output_____"]],[["## 1. Load data, analyze and split in *training*/*validation*/*testing* sets","_____no_output_____"]],[["# Cifar-10 class names\n# We will create a dictionary for each type of label\n# This is a mapping from the int class name to \n# their corresponding string class name\nLABELS = {\n 0: \"airplane\",\n 1: \"automobile\",\n 2: \"bird\",\n 3: \"cat\",\n 4: \"deer\",\n 5: \"dog\",\n 6: \"frog\",\n 7: \"horse\",\n 8: \"ship\",\n 9: \"truck\"\n}\n\n# Load dataset from keras\n(x_train_data, y_train_data), (x_test_data, y_test_data) = cifar10.load_data()\n\n############\n# [COMPLETE] \n# Add some prints here to see the loaded data dimensions\n############\n\nprint(\"Cifar-10 x_train shape: {}\".format(x_train_data.shape))\nprint(\"Cifar-10 y_train shape: {}\".format(y_train_data.shape))\nprint(\"Cifar-10 x_test shape: {}\".format(x_test_data.shape))\nprint(\"Cifar-10 y_test shape: {}\".format(y_test_data.shape))","Cifar-10 x_train shape: (50000, 32, 32, 3)\nCifar-10 y_train shape: (50000, 1)\nCifar-10 x_test shape: (10000, 32, 32, 3)\nCifar-10 y_test shape: (10000, 1)\n"],["# from https://www.cs.toronto.edu/~kriz/cifar.html\n# The CIFAR-10 dataset consists of 60000 32x32 colour images in 10 classes, with 6000 images per class. There are 50000 training images and 10000 test images. \n# The classes are completely mutually exclusive. There is no overlap between automobiles and trucks. \"Automobile\" includes sedans, SUVs, things of that sort. \"Truck\" includes only big trucks. Neither includes pickup trucks. \n# Some constants\nIMG_ROWS = 32\nIMG_COLS = 32\nNUM_CLASSES = 10\nRANDOM_STATE = 2018\n############\n# [COMPLETE] \n# Analyze the amount of images for each class\n# Plot some images to explore how they look\n############\nfrom genlib import get_classes_distribution,plot_label_per_class\nfor y,yt in zip([y_train_data.flatten(),y_test_data.flatten()],['Train','Test']):\n print('{:>15s}'.format(yt))\n get_classes_distribution(y,LABELS)\n plot_label_per_class(y,LABELS)"," Train\nairplane : 5000 or 10.00%\nautomobile : 5000 or 10.00%\nbird : 5000 or 10.00%\ncat : 5000 or 10.00%\ndeer : 5000 or 10.00%\ndog : 5000 or 10.00%\nfrog : 5000 or 10.00%\nhorse : 5000 or 10.00%\nship : 5000 or 10.00%\ntruck : 5000 or 10.00%\n"]],[["Todo parece ir de acuerdo a la documentación. Veamos las imagenes,","_____no_output_____"]],[["from genlib import sample_images_data,plot_sample_images\nfor xy,yt in zip([(x_train_data,y_train_data.flatten()),(x_test_data,y_test_data.flatten())],['Train','Test']):\n print('{:>15s}'.format(yt))\n train_sample_images, train_sample_labels = sample_images_data(*xy,LABELS)\n plot_sample_images(train_sample_images, train_sample_labels,LABELS)"," Train\nTotal number of sample images to plot: 40\n"],["############\n# [COMPLETE] \n# Split training set in train/val sets\n# Use the sampling method that you want\n############\n#init seed\nnp.random.seed(seed=RANDOM_STATE)\n\nfull_set_flag=False # True: uses all images / False only a subset specified by TRAIN Samples and Val Frac\nVAL_FRAC=0.2\nTRAIN_SIZE_BFV=x_train_data.shape[0]\nTRAIN_FRAC=(1-VAL_FRAC)\n# calc\nTRAIN_SAMPLES_FULL=int(TRAIN_FRAC*TRAIN_SIZE_BFV) # if full_set_flag==True\nTRAIN_SAMPLES_RED=20000 # if full_set_flag==False\nVAL_SAMPLES_RED=int(VAL_FRAC*TRAIN_SAMPLES_RED) # if full_set_flag==False\n \nif full_set_flag:\n # Esta forma parece servir si barremos todo el set sino...\n #\n # Get Index\n train_idxs = np.random.choice(np.arange(TRAIN_SIZE_BFV), size=TRAIN_SAMPLES_FULL, replace=False)\n val_idx=np.array([x for x in np.arange(TRAIN_SIZE_BFV) if x not in train_idxs]) \nelse:\n train_idxs = np.random.choice(np.arange(TRAIN_SIZE_BFV), size=TRAIN_SAMPLES_RED, replace=False)\n val_idx=np.random.choice(train_idxs, size=VAL_SAMPLES_RED, replace=False)\n \n# Split\nx_val_data = x_train_data[val_idx, :, :, :]\ny_val_data = y_train_data[val_idx]\nx_train_data = x_train_data[train_idxs, :, :, :]\ny_train_data = y_train_data[train_idxs]\n####","_____no_output_____"],["####\nprint(\"Cifar-10 x_train shape: {}\".format(x_train_data.shape))\nprint(\"Cifar-10 y_train shape: {}\".format(y_train_data.shape))\nprint(\"Cifar-10 x_val shape: {}\".format(x_val_data.shape))\nprint(\"Cifar-10 y_val shape: {}\".format(y_val_data.shape))\nprint(\"Cifar-10 x_test shape: {}\".format(x_test_data.shape))\nprint(\"Cifar-10 y_test shape: {}\".format(y_test_data.shape))","Cifar-10 x_train shape: (20000, 32, 32, 3)\nCifar-10 y_train shape: (20000, 1)\nCifar-10 x_val shape: (4000, 32, 32, 3)\nCifar-10 y_val shape: (4000, 1)\nCifar-10 x_test shape: (10000, 32, 32, 3)\nCifar-10 y_test shape: (10000, 1)\n"]],[["Veamos si quedaron balanceados Train y Validation","_____no_output_____"]],[["for y,yt in zip([y_train_data.flatten(),y_val_data.flatten()],['Train','Validation']):\n print('{:>15s}'.format(yt))\n get_classes_distribution(y,LABELS)\n plot_label_per_class(y,LABELS)"," Train\nairplane : 1950 or 9.75%\nautomobile : 1985 or 9.93%\nbird : 2012 or 10.06%\ncat : 1993 or 9.96%\ndeer : 1943 or 9.71%\ndog : 1994 or 9.97%\nfrog : 2028 or 10.14%\nhorse : 2030 or 10.15%\nship : 2012 or 10.06%\ntruck : 2053 or 10.27%\n"],["# In order to use the MobileNet CNN pre-trained on imagenet, we have\n# to resize our images to have one of the following static square shape: [(128, 128),\n# (160, 160), (192, 192), or (224, 224)].\n# If we try to resize all the dataset this will not fit on memory, so we have to save all\n# the images to disk, and then when loading those images, our datagenerator will resize them\n# to the desired shape on-the-fly.\n\n############\n# [COMPLETE] \n# Use the above function to save all your data, e.g.:\n# save_to_disk(x_train, y_train, 'train', 'cifar10_images')\n# save_to_disk(x_val, y_val, 'val', 'cifar10_images')\n# save_to_disk(x_test, y_test, 'test', 'cifar10_images')\n############\n\nsave_image_flag=False # To avoid saving images every time!!!\n\nif save_image_flag:\n from genlib import save_to_disk\n save_to_disk(x_train_data, y_train_data, 'train', output_dir='cifar10_images')\n save_to_disk(x_val_data, y_val_data, 'val', output_dir='cifar10_images')\n save_to_disk(x_test_data, y_test_data, 'test', output_dir='cifar10_images')","_____no_output_____"]],[["## 2. Load CNN and analyze architecture","_____no_output_____"]],[["#Model\nNO_EPOCHS = 25\nBATCH_SIZE = 32\nNET_IMG_ROWS = 128\nNET_IMG_COLS = 128","_____no_output_____"],["############\n# [COMPLETE] \n# Use the MobileNet class from Keras to load your base model, pre-trained on imagenet.\n# We wan't to load the pre-trained weights, but without the classification layer.\n# Check the notebook '3_transfer-learning' or https://keras.io/applications/#mobilenet to get more\n# info about how to load this network properly.\n############\n#Note that this model only supports the data format 'channels_last' (height, width, channels).\n#The default input size for this model is 224x224.\n\nbase_model = MobileNet(input_shape=(NET_IMG_ROWS, NET_IMG_COLS, 3), # Input image size\n weights='imagenet', # Use imagenet pre-trained weights\n include_top=False, # Drop classification layer\n pooling='avg') # Global AVG pooling for the \n # output feature vector","_____no_output_____"]],[["## 3. Adapt this CNN to our problem","_____no_output_____"]],[["############\n# [COMPLETE] \n# Having the CNN loaded, now we have to add some layers to adapt this network to our\n# classification problem.\n# We can choose to finetune just the new added layers, some particular layers or all the layer of the\n# model. Play with different settings and compare the results.\n############\n\n# get the output feature vector from the base model\nx = base_model.output\n# let's add a fully-connected layer\nx = Dense(1024, activation='relu')(x)\n# Add Drop Out Layer\nx=Dropout(0.5)(x)\n# and a logistic layer\npredictions = Dense(NUM_CLASSES, activation='softmax')(x)\n\n# this is the model we will train\nmodel = Model(inputs=base_model.input, outputs=predictions)","_____no_output_____"],["# Initial Model Summary\nmodel.summary()","_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ninput_1 (InputLayer) (None, 128, 128, 3) 0 \n_________________________________________________________________\nconv1_pad (ZeroPadding2D) (None, 129, 129, 3) 0 \n_________________________________________________________________\nconv1 (Conv2D) (None, 64, 64, 32) 864 \n_________________________________________________________________\nconv1_bn (BatchNormalization (None, 64, 64, 32) 128 \n_________________________________________________________________\nconv1_relu (ReLU) (None, 64, 64, 32) 0 \n_________________________________________________________________\nconv_dw_1 (DepthwiseConv2D) (None, 64, 64, 32) 288 \n_________________________________________________________________\nconv_dw_1_bn (BatchNormaliza (None, 64, 64, 32) 128 \n_________________________________________________________________\nconv_dw_1_relu (ReLU) (None, 64, 64, 32) 0 \n_________________________________________________________________\nconv_pw_1 (Conv2D) (None, 64, 64, 64) 2048 \n_________________________________________________________________\nconv_pw_1_bn (BatchNormaliza (None, 64, 64, 64) 256 \n_________________________________________________________________\nconv_pw_1_relu (ReLU) (None, 64, 64, 64) 0 \n_________________________________________________________________\nconv_pad_2 (ZeroPadding2D) (None, 65, 65, 64) 0 \n_________________________________________________________________\nconv_dw_2 (DepthwiseConv2D) (None, 32, 32, 64) 576 \n_________________________________________________________________\nconv_dw_2_bn (BatchNormaliza (None, 32, 32, 64) 256 \n_________________________________________________________________\nconv_dw_2_relu (ReLU) (None, 32, 32, 64) 0 \n_________________________________________________________________\nconv_pw_2 (Conv2D) (None, 32, 32, 128) 8192 \n_________________________________________________________________\nconv_pw_2_bn (BatchNormaliza (None, 32, 32, 128) 512 \n_________________________________________________________________\nconv_pw_2_relu (ReLU) (None, 32, 32, 128) 0 \n_________________________________________________________________\nconv_dw_3 (DepthwiseConv2D) (None, 32, 32, 128) 1152 \n_________________________________________________________________\nconv_dw_3_bn (BatchNormaliza (None, 32, 32, 128) 512 \n_________________________________________________________________\nconv_dw_3_relu (ReLU) (None, 32, 32, 128) 0 \n_________________________________________________________________\nconv_pw_3 (Conv2D) (None, 32, 32, 128) 16384 \n_________________________________________________________________\nconv_pw_3_bn (BatchNormaliza (None, 32, 32, 128) 512 \n_________________________________________________________________\nconv_pw_3_relu (ReLU) (None, 32, 32, 128) 0 \n_________________________________________________________________\nconv_pad_4 (ZeroPadding2D) (None, 33, 33, 128) 0 \n_________________________________________________________________\nconv_dw_4 (DepthwiseConv2D) (None, 16, 16, 128) 1152 \n_________________________________________________________________\nconv_dw_4_bn (BatchNormaliza (None, 16, 16, 128) 512 \n_________________________________________________________________\nconv_dw_4_relu (ReLU) (None, 16, 16, 128) 0 \n_________________________________________________________________\nconv_pw_4 (Conv2D) (None, 16, 16, 256) 32768 \n_________________________________________________________________\nconv_pw_4_bn (BatchNormaliza (None, 16, 16, 256) 1024 \n_________________________________________________________________\nconv_pw_4_relu (ReLU) (None, 16, 16, 256) 0 \n_________________________________________________________________\nconv_dw_5 (DepthwiseConv2D) (None, 16, 16, 256) 2304 \n_________________________________________________________________\nconv_dw_5_bn (BatchNormaliza (None, 16, 16, 256) 1024 \n_________________________________________________________________\nconv_dw_5_relu (ReLU) (None, 16, 16, 256) 0 \n_________________________________________________________________\nconv_pw_5 (Conv2D) (None, 16, 16, 256) 65536 \n_________________________________________________________________\nconv_pw_5_bn (BatchNormaliza (None, 16, 16, 256) 1024 \n_________________________________________________________________\nconv_pw_5_relu (ReLU) (None, 16, 16, 256) 0 \n_________________________________________________________________\nconv_pad_6 (ZeroPadding2D) (None, 17, 17, 256) 0 \n_________________________________________________________________\nconv_dw_6 (DepthwiseConv2D) (None, 8, 8, 256) 2304 \n_________________________________________________________________\nconv_dw_6_bn (BatchNormaliza (None, 8, 8, 256) 1024 \n_________________________________________________________________\nconv_dw_6_relu (ReLU) (None, 8, 8, 256) 0 \n_________________________________________________________________\nconv_pw_6 (Conv2D) (None, 8, 8, 512) 131072 \n_________________________________________________________________\nconv_pw_6_bn (BatchNormaliza (None, 8, 8, 512) 2048 \n_________________________________________________________________\nconv_pw_6_relu (ReLU) (None, 8, 8, 512) 0 \n_________________________________________________________________\nconv_dw_7 (DepthwiseConv2D) (None, 8, 8, 512) 4608 \n_________________________________________________________________\nconv_dw_7_bn (BatchNormaliza (None, 8, 8, 512) 2048 \n_________________________________________________________________\nconv_dw_7_relu (ReLU) (None, 8, 8, 512) 0 \n_________________________________________________________________\nconv_pw_7 (Conv2D) (None, 8, 8, 512) 262144 \n_________________________________________________________________\nconv_pw_7_bn (BatchNormaliza (None, 8, 8, 512) 2048 \n_________________________________________________________________\nconv_pw_7_relu (ReLU) (None, 8, 8, 512) 0 \n_________________________________________________________________\nconv_dw_8 (DepthwiseConv2D) (None, 8, 8, 512) 4608 \n_________________________________________________________________\nconv_dw_8_bn (BatchNormaliza (None, 8, 8, 512) 2048 \n_________________________________________________________________\nconv_dw_8_relu (ReLU) (None, 8, 8, 512) 0 \n_________________________________________________________________\nconv_pw_8 (Conv2D) (None, 8, 8, 512) 262144 \n_________________________________________________________________\nconv_pw_8_bn (BatchNormaliza (None, 8, 8, 512) 2048 \n_________________________________________________________________\nconv_pw_8_relu (ReLU) (None, 8, 8, 512) 0 \n_________________________________________________________________\nconv_dw_9 (DepthwiseConv2D) (None, 8, 8, 512) 4608 \n_________________________________________________________________\nconv_dw_9_bn (BatchNormaliza (None, 8, 8, 512) 2048 \n_________________________________________________________________\nconv_dw_9_relu (ReLU) (None, 8, 8, 512) 0 \n_________________________________________________________________\nconv_pw_9 (Conv2D) (None, 8, 8, 512) 262144 \n_________________________________________________________________\nconv_pw_9_bn (BatchNormaliza (None, 8, 8, 512) 2048 \n_________________________________________________________________\nconv_pw_9_relu (ReLU) (None, 8, 8, 512) 0 \n_________________________________________________________________\nconv_dw_10 (DepthwiseConv2D) (None, 8, 8, 512) 4608 \n_________________________________________________________________\nconv_dw_10_bn (BatchNormaliz (None, 8, 8, 512) 2048 \n_________________________________________________________________\nconv_dw_10_relu (ReLU) (None, 8, 8, 512) 0 \n_________________________________________________________________\nconv_pw_10 (Conv2D) (None, 8, 8, 512) 262144 \n_________________________________________________________________\nconv_pw_10_bn (BatchNormaliz (None, 8, 8, 512) 2048 \n_________________________________________________________________\nconv_pw_10_relu (ReLU) (None, 8, 8, 512) 0 \n_________________________________________________________________\nconv_dw_11 (DepthwiseConv2D) (None, 8, 8, 512) 4608 \n_________________________________________________________________\nconv_dw_11_bn (BatchNormaliz (None, 8, 8, 512) 2048 \n_________________________________________________________________\nconv_dw_11_relu (ReLU) (None, 8, 8, 512) 0 \n_________________________________________________________________\nconv_pw_11 (Conv2D) (None, 8, 8, 512) 262144 \n_________________________________________________________________\nconv_pw_11_bn (BatchNormaliz (None, 8, 8, 512) 2048 \n_________________________________________________________________\nconv_pw_11_relu (ReLU) (None, 8, 8, 512) 0 \n_________________________________________________________________\nconv_pad_12 (ZeroPadding2D) (None, 9, 9, 512) 0 \n_________________________________________________________________\nconv_dw_12 (DepthwiseConv2D) (None, 4, 4, 512) 4608 \n_________________________________________________________________\nconv_dw_12_bn (BatchNormaliz (None, 4, 4, 512) 2048 \n_________________________________________________________________\nconv_dw_12_relu (ReLU) (None, 4, 4, 512) 0 \n_________________________________________________________________\nconv_pw_12 (Conv2D) (None, 4, 4, 1024) 524288 \n_________________________________________________________________\nconv_pw_12_bn (BatchNormaliz (None, 4, 4, 1024) 4096 \n_________________________________________________________________\nconv_pw_12_relu (ReLU) (None, 4, 4, 1024) 0 \n_________________________________________________________________\nconv_dw_13 (DepthwiseConv2D) (None, 4, 4, 1024) 9216 \n_________________________________________________________________\nconv_dw_13_bn (BatchNormaliz (None, 4, 4, 1024) 4096 \n_________________________________________________________________\nconv_dw_13_relu (ReLU) (None, 4, 4, 1024) 0 \n_________________________________________________________________\nconv_pw_13 (Conv2D) (None, 4, 4, 1024) 1048576 \n_________________________________________________________________\nconv_pw_13_bn (BatchNormaliz (None, 4, 4, 1024) 4096 \n_________________________________________________________________\nconv_pw_13_relu (ReLU) (None, 4, 4, 1024) 0 \n_________________________________________________________________\nglobal_average_pooling2d_1 ( (None, 1024) 0 \n_________________________________________________________________\ndense_1 (Dense) (None, 1024) 1049600 \n_________________________________________________________________\ndropout_1 (Dropout) (None, 1024) 0 \n_________________________________________________________________\ndense_2 (Dense) (None, 10) 10250 \n=================================================================\nTotal params: 4,288,714\nTrainable params: 4,266,826\nNon-trainable params: 21,888\n_________________________________________________________________\n"],["model_png=False\nif model_png:\n plot_model(model, to_file='model.png')\n SVG(model_to_dot(model).create(prog='dot', format='svg'))","_____no_output_____"],["# let's visualize layer names and layer indices to see how many layers\n# we should freeze:\nfor i, layer in enumerate(model.layers):\n print(i, layer.name)","0 input_1\n1 conv1_pad\n2 conv1\n3 conv1_bn\n4 conv1_relu\n5 conv_dw_1\n6 conv_dw_1_bn\n7 conv_dw_1_relu\n8 conv_pw_1\n9 conv_pw_1_bn\n10 conv_pw_1_relu\n11 conv_pad_2\n12 conv_dw_2\n13 conv_dw_2_bn\n14 conv_dw_2_relu\n15 conv_pw_2\n16 conv_pw_2_bn\n17 conv_pw_2_relu\n18 conv_dw_3\n19 conv_dw_3_bn\n20 conv_dw_3_relu\n21 conv_pw_3\n22 conv_pw_3_bn\n23 conv_pw_3_relu\n24 conv_pad_4\n25 conv_dw_4\n26 conv_dw_4_bn\n27 conv_dw_4_relu\n28 conv_pw_4\n29 conv_pw_4_bn\n30 conv_pw_4_relu\n31 conv_dw_5\n32 conv_dw_5_bn\n33 conv_dw_5_relu\n34 conv_pw_5\n35 conv_pw_5_bn\n36 conv_pw_5_relu\n37 conv_pad_6\n38 conv_dw_6\n39 conv_dw_6_bn\n40 conv_dw_6_relu\n41 conv_pw_6\n42 conv_pw_6_bn\n43 conv_pw_6_relu\n44 conv_dw_7\n45 conv_dw_7_bn\n46 conv_dw_7_relu\n47 conv_pw_7\n48 conv_pw_7_bn\n49 conv_pw_7_relu\n50 conv_dw_8\n51 conv_dw_8_bn\n52 conv_dw_8_relu\n53 conv_pw_8\n54 conv_pw_8_bn\n55 conv_pw_8_relu\n56 conv_dw_9\n57 conv_dw_9_bn\n58 conv_dw_9_relu\n59 conv_pw_9\n60 conv_pw_9_bn\n61 conv_pw_9_relu\n62 conv_dw_10\n63 conv_dw_10_bn\n64 conv_dw_10_relu\n65 conv_pw_10\n66 conv_pw_10_bn\n67 conv_pw_10_relu\n68 conv_dw_11\n69 conv_dw_11_bn\n70 conv_dw_11_relu\n71 conv_pw_11\n72 conv_pw_11_bn\n73 conv_pw_11_relu\n74 conv_pad_12\n75 conv_dw_12\n76 conv_dw_12_bn\n77 conv_dw_12_relu\n78 conv_pw_12\n79 conv_pw_12_bn\n80 conv_pw_12_relu\n81 conv_dw_13\n82 conv_dw_13_bn\n83 conv_dw_13_relu\n84 conv_pw_13\n85 conv_pw_13_bn\n86 conv_pw_13_relu\n87 global_average_pooling2d_1\n88 dense_1\n89 dropout_1\n90 dense_2\n"],["# En esta instancia no pretendemos entrenar todas sino las ultimas agregadas \nfor layer in model.layers[:88]:\n layer.trainable = False\nfor layer in model.layers[88:]:\n layer.trainable = True","_____no_output_____"],["model.summary()","_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ninput_1 (InputLayer) (None, 128, 128, 3) 0 \n_________________________________________________________________\nconv1_pad (ZeroPadding2D) (None, 129, 129, 3) 0 \n_________________________________________________________________\nconv1 (Conv2D) (None, 64, 64, 32) 864 \n_________________________________________________________________\nconv1_bn (BatchNormalization (None, 64, 64, 32) 128 \n_________________________________________________________________\nconv1_relu (ReLU) (None, 64, 64, 32) 0 \n_________________________________________________________________\nconv_dw_1 (DepthwiseConv2D) (None, 64, 64, 32) 288 \n_________________________________________________________________\nconv_dw_1_bn (BatchNormaliza (None, 64, 64, 32) 128 \n_________________________________________________________________\nconv_dw_1_relu (ReLU) (None, 64, 64, 32) 0 \n_________________________________________________________________\nconv_pw_1 (Conv2D) (None, 64, 64, 64) 2048 \n_________________________________________________________________\nconv_pw_1_bn (BatchNormaliza (None, 64, 64, 64) 256 \n_________________________________________________________________\nconv_pw_1_relu (ReLU) (None, 64, 64, 64) 0 \n_________________________________________________________________\nconv_pad_2 (ZeroPadding2D) (None, 65, 65, 64) 0 \n_________________________________________________________________\nconv_dw_2 (DepthwiseConv2D) (None, 32, 32, 64) 576 \n_________________________________________________________________\nconv_dw_2_bn (BatchNormaliza (None, 32, 32, 64) 256 \n_________________________________________________________________\nconv_dw_2_relu (ReLU) (None, 32, 32, 64) 0 \n_________________________________________________________________\nconv_pw_2 (Conv2D) (None, 32, 32, 128) 8192 \n_________________________________________________________________\nconv_pw_2_bn (BatchNormaliza (None, 32, 32, 128) 512 \n_________________________________________________________________\nconv_pw_2_relu (ReLU) (None, 32, 32, 128) 0 \n_________________________________________________________________\nconv_dw_3 (DepthwiseConv2D) (None, 32, 32, 128) 1152 \n_________________________________________________________________\nconv_dw_3_bn (BatchNormaliza (None, 32, 32, 128) 512 \n_________________________________________________________________\nconv_dw_3_relu (ReLU) (None, 32, 32, 128) 0 \n_________________________________________________________________\nconv_pw_3 (Conv2D) (None, 32, 32, 128) 16384 \n_________________________________________________________________\nconv_pw_3_bn (BatchNormaliza (None, 32, 32, 128) 512 \n_________________________________________________________________\nconv_pw_3_relu (ReLU) (None, 32, 32, 128) 0 \n_________________________________________________________________\nconv_pad_4 (ZeroPadding2D) (None, 33, 33, 128) 0 \n_________________________________________________________________\nconv_dw_4 (DepthwiseConv2D) (None, 16, 16, 128) 1152 \n_________________________________________________________________\nconv_dw_4_bn (BatchNormaliza (None, 16, 16, 128) 512 \n_________________________________________________________________\nconv_dw_4_relu (ReLU) (None, 16, 16, 128) 0 \n_________________________________________________________________\nconv_pw_4 (Conv2D) (None, 16, 16, 256) 32768 \n_________________________________________________________________\nconv_pw_4_bn (BatchNormaliza (None, 16, 16, 256) 1024 \n_________________________________________________________________\nconv_pw_4_relu (ReLU) (None, 16, 16, 256) 0 \n_________________________________________________________________\nconv_dw_5 (DepthwiseConv2D) (None, 16, 16, 256) 2304 \n_________________________________________________________________\nconv_dw_5_bn (BatchNormaliza (None, 16, 16, 256) 1024 \n_________________________________________________________________\nconv_dw_5_relu (ReLU) (None, 16, 16, 256) 0 \n_________________________________________________________________\nconv_pw_5 (Conv2D) (None, 16, 16, 256) 65536 \n_________________________________________________________________\nconv_pw_5_bn (BatchNormaliza (None, 16, 16, 256) 1024 \n_________________________________________________________________\nconv_pw_5_relu (ReLU) (None, 16, 16, 256) 0 \n_________________________________________________________________\nconv_pad_6 (ZeroPadding2D) (None, 17, 17, 256) 0 \n_________________________________________________________________\nconv_dw_6 (DepthwiseConv2D) (None, 8, 8, 256) 2304 \n_________________________________________________________________\nconv_dw_6_bn (BatchNormaliza (None, 8, 8, 256) 1024 \n_________________________________________________________________\nconv_dw_6_relu (ReLU) (None, 8, 8, 256) 0 \n_________________________________________________________________\nconv_pw_6 (Conv2D) (None, 8, 8, 512) 131072 \n_________________________________________________________________\nconv_pw_6_bn (BatchNormaliza (None, 8, 8, 512) 2048 \n_________________________________________________________________\nconv_pw_6_relu (ReLU) (None, 8, 8, 512) 0 \n_________________________________________________________________\nconv_dw_7 (DepthwiseConv2D) (None, 8, 8, 512) 4608 \n_________________________________________________________________\nconv_dw_7_bn (BatchNormaliza (None, 8, 8, 512) 2048 \n_________________________________________________________________\nconv_dw_7_relu (ReLU) (None, 8, 8, 512) 0 \n_________________________________________________________________\nconv_pw_7 (Conv2D) (None, 8, 8, 512) 262144 \n_________________________________________________________________\nconv_pw_7_bn (BatchNormaliza (None, 8, 8, 512) 2048 \n_________________________________________________________________\nconv_pw_7_relu (ReLU) (None, 8, 8, 512) 0 \n_________________________________________________________________\nconv_dw_8 (DepthwiseConv2D) (None, 8, 8, 512) 4608 \n_________________________________________________________________\nconv_dw_8_bn (BatchNormaliza (None, 8, 8, 512) 2048 \n_________________________________________________________________\nconv_dw_8_relu (ReLU) (None, 8, 8, 512) 0 \n_________________________________________________________________\nconv_pw_8 (Conv2D) (None, 8, 8, 512) 262144 \n_________________________________________________________________\nconv_pw_8_bn (BatchNormaliza (None, 8, 8, 512) 2048 \n_________________________________________________________________\nconv_pw_8_relu (ReLU) (None, 8, 8, 512) 0 \n_________________________________________________________________\nconv_dw_9 (DepthwiseConv2D) (None, 8, 8, 512) 4608 \n_________________________________________________________________\nconv_dw_9_bn (BatchNormaliza (None, 8, 8, 512) 2048 \n_________________________________________________________________\nconv_dw_9_relu (ReLU) (None, 8, 8, 512) 0 \n_________________________________________________________________\nconv_pw_9 (Conv2D) (None, 8, 8, 512) 262144 \n_________________________________________________________________\nconv_pw_9_bn (BatchNormaliza (None, 8, 8, 512) 2048 \n_________________________________________________________________\nconv_pw_9_relu (ReLU) (None, 8, 8, 512) 0 \n_________________________________________________________________\nconv_dw_10 (DepthwiseConv2D) (None, 8, 8, 512) 4608 \n_________________________________________________________________\nconv_dw_10_bn (BatchNormaliz (None, 8, 8, 512) 2048 \n_________________________________________________________________\nconv_dw_10_relu (ReLU) (None, 8, 8, 512) 0 \n_________________________________________________________________\nconv_pw_10 (Conv2D) (None, 8, 8, 512) 262144 \n_________________________________________________________________\nconv_pw_10_bn (BatchNormaliz (None, 8, 8, 512) 2048 \n_________________________________________________________________\nconv_pw_10_relu (ReLU) (None, 8, 8, 512) 0 \n_________________________________________________________________\nconv_dw_11 (DepthwiseConv2D) (None, 8, 8, 512) 4608 \n_________________________________________________________________\nconv_dw_11_bn (BatchNormaliz (None, 8, 8, 512) 2048 \n_________________________________________________________________\nconv_dw_11_relu (ReLU) (None, 8, 8, 512) 0 \n_________________________________________________________________\nconv_pw_11 (Conv2D) (None, 8, 8, 512) 262144 \n_________________________________________________________________\nconv_pw_11_bn (BatchNormaliz (None, 8, 8, 512) 2048 \n_________________________________________________________________\nconv_pw_11_relu (ReLU) (None, 8, 8, 512) 0 \n_________________________________________________________________\nconv_pad_12 (ZeroPadding2D) (None, 9, 9, 512) 0 \n_________________________________________________________________\nconv_dw_12 (DepthwiseConv2D) (None, 4, 4, 512) 4608 \n_________________________________________________________________\nconv_dw_12_bn (BatchNormaliz (None, 4, 4, 512) 2048 \n_________________________________________________________________\nconv_dw_12_relu (ReLU) (None, 4, 4, 512) 0 \n_________________________________________________________________\nconv_pw_12 (Conv2D) (None, 4, 4, 1024) 524288 \n_________________________________________________________________\nconv_pw_12_bn (BatchNormaliz (None, 4, 4, 1024) 4096 \n_________________________________________________________________\nconv_pw_12_relu (ReLU) (None, 4, 4, 1024) 0 \n_________________________________________________________________\nconv_dw_13 (DepthwiseConv2D) (None, 4, 4, 1024) 9216 \n_________________________________________________________________\nconv_dw_13_bn (BatchNormaliz (None, 4, 4, 1024) 4096 \n_________________________________________________________________\nconv_dw_13_relu (ReLU) (None, 4, 4, 1024) 0 \n_________________________________________________________________\nconv_pw_13 (Conv2D) (None, 4, 4, 1024) 1048576 \n_________________________________________________________________\nconv_pw_13_bn (BatchNormaliz (None, 4, 4, 1024) 4096 \n_________________________________________________________________\nconv_pw_13_relu (ReLU) (None, 4, 4, 1024) 0 \n_________________________________________________________________\nglobal_average_pooling2d_1 ( (None, 1024) 0 \n_________________________________________________________________\ndense_1 (Dense) (None, 1024) 1049600 \n_________________________________________________________________\ndropout_1 (Dropout) (None, 1024) 0 \n_________________________________________________________________\ndense_2 (Dense) (None, 10) 10250 \n=================================================================\nTotal params: 4,288,714\nTrainable params: 1,059,850\nNon-trainable params: 3,228,864\n_________________________________________________________________\n"]],[["## 4. Setup data augmentation techniques","_____no_output_____"]],[["############\n# [COMPLETE] \n# Use data augmentation to train your model.\n# Use the Keras ImageDataGenerator class for this porpouse.\n# Note: Given that we want to load our images from disk, instead of using \n# ImageDataGenerator.flow method, we have to use ImageDataGenerator.flow_from_directory \n# method in the following way:\n# generator_train = dataget_train.flow_from_directory('resized_images/train', \n# target_size=(128, 128), batch_size=32)\n# generator_val = dataget_train.flow_from_directory('resized_images/val', \n# target_size=(128, 128), batch_size=32)\n# Note that we have to resize our images to finetune the MobileNet CNN, this is done using \n# the target_size argument in flow_from_directory. Remember to set the target_size to one of\n# the valid listed here: [(128, 128), (160, 160), (192, 192), or (224, 224)].\n############\ndata_get=ImageDataGenerator()\ngenerator_train = data_get.flow_from_directory(directory='cifar10_images/train',\n target_size=(128, 128), batch_size=BATCH_SIZE)\ngenerator_val = data_get.flow_from_directory(directory='cifar10_images/val', \n target_size=(128, 128), batch_size=BATCH_SIZE)\n","Found 40000 images belonging to 10 classes.\nFound 10000 images belonging to 10 classes.\n"]],[["## 5. Add some keras callbacks","_____no_output_____"]],[["############\n# [COMPLETE] \n# Load and set some Keras callbacks here!\n############\n\nEXP_ID='experiment_003/'\n\nfrom keras.callbacks import ModelCheckpoint, TensorBoard\n\nif not os.path.exists(EXP_ID):\n os.makedirs(EXP_ID)\n\ncallbacks = [\n ModelCheckpoint(filepath=os.path.join(EXP_ID, 'weights.{epoch:02d}-{val_loss:.2f}.hdf5'),\n monitor='val_loss', \n verbose=1, \n save_best_only=False, \n save_weights_only=False, \n mode='auto'),\n TensorBoard(log_dir=os.path.join(EXP_ID, 'logs'), \n write_graph=True, \n write_images=False)\n]\n","_____no_output_____"]],[["## 6. Setup optimization algorithm with their hyperparameters","_____no_output_____"]],[["############\n# [COMPLETE] \n# Choose some optimization algorithm and explore different hyperparameters.\n# Compile your model.\n############\nfrom keras.optimizers import SGD\nfrom keras.losses import categorical_crossentropy\n#model.compile(optimizer=SGD(lr=0.0001, momentum=0.9), \n# loss='categorical_crossentropy',\n# metrics=['accuracy'])\n\n\nmodel.compile(loss=categorical_crossentropy,\n optimizer='adam',\n metrics=['accuracy'])","_____no_output_____"]],[["## 7. Train model!","_____no_output_____"]],[["generator_train.n","_____no_output_____"],["############\n# [COMPLETE] \n# Use fit_generator to train your model.\n# e.g.:\n# model.fit_generator(\n# generator_train,\n# epochs=50,\n# validation_data=generator_val,\n# steps_per_epoch=generator_train.n // 32,\n# validation_steps=generator_val.n // 32)\n############\nif full_set_flag:\n steps_per_epoch=generator_train.n // BATCH_SIZE\n validation_steps=generator_val.n // BATCH_SIZE\nelse:\n steps_per_epoch=TRAIN_SAMPLES_RED // BATCH_SIZE\n validation_steps=VAL_SAMPLES_RED // BATCH_SIZE \n\n\nmodel.fit_generator(generator_train,\n epochs=NO_EPOCHS,\n validation_data=generator_val,\n steps_per_epoch=steps_per_epoch,\n validation_steps=validation_steps,\n callbacks=callbacks)","Epoch 1/25\n625/625 [==============================] - 1911s 3s/step - loss: 0.7648 - acc: 0.7352 - val_loss: 2.4989 - val_acc: 0.2167\n\nEpoch 00001: saving model to experiment_003/weights.01-2.50.hdf5\nEpoch 2/25\n625/625 [==============================] - 1927s 3s/step - loss: 0.7447 - acc: 0.7426 - val_loss: 2.7681 - val_acc: 0.1904\n\nEpoch 00002: saving model to experiment_003/weights.02-2.77.hdf5\nEpoch 3/25\n625/625 [==============================] - 1902s 3s/step - loss: 0.6890 - acc: 0.7630 - val_loss: 2.9040 - val_acc: 0.2019\n\nEpoch 00003: saving model to experiment_003/weights.03-2.90.hdf5\nEpoch 4/25\n625/625 [==============================] - 1933s 3s/step - loss: 0.6982 - acc: 0.7597 - val_loss: 2.9734 - val_acc: 0.1787\n\nEpoch 00004: saving model to experiment_003/weights.04-2.97.hdf5\nEpoch 5/25\n625/625 [==============================] - 1914s 3s/step - loss: 0.6404 - acc: 0.7810 - val_loss: 2.3613 - val_acc: 0.2074\n\nEpoch 00005: saving model to experiment_003/weights.05-2.36.hdf5\nEpoch 6/25\n625/625 [==============================] - 1903s 3s/step - loss: 0.6643 - acc: 0.7724 - val_loss: 2.6470 - val_acc: 0.2183\n\nEpoch 00006: saving model to experiment_003/weights.06-2.65.hdf5\nEpoch 7/25\n625/625 [==============================] - 1924s 3s/step - loss: 0.6096 - acc: 0.7885 - val_loss: 2.4154 - val_acc: 0.2025\n\nEpoch 00007: saving model to experiment_003/weights.07-2.42.hdf5\nEpoch 8/25\n625/625 [==============================] - 1935s 3s/step - loss: 0.6471 - acc: 0.7776 - val_loss: 2.5618 - val_acc: 0.2140\n\nEpoch 00008: saving model to experiment_003/weights.08-2.56.hdf5\nEpoch 9/25\n625/625 [==============================] - 2020s 3s/step - loss: 0.5878 - acc: 0.7964 - val_loss: 3.1497 - val_acc: 0.1823\n\nEpoch 00009: saving model to experiment_003/weights.09-3.15.hdf5\nEpoch 10/25\n625/625 [==============================] - 1981s 3s/step - loss: 0.6049 - acc: 0.7921 - val_loss: 3.1617 - val_acc: 0.1673\n\nEpoch 00010: saving model to experiment_003/weights.10-3.16.hdf5\nEpoch 11/25\n624/625 [============================>.] - ETA: 5s - loss: 0.5667 - acc: 0.8012 "]],[["## 8. Choose best model/snapshot","_____no_output_____"]],[["############\n# [COMPLETE] \n# Analyze and compare your results. Choose the best model and snapshot, \n# justify your election. \n############\n","_____no_output_____"]],[["## 9. Evaluate final model on the *testing* set","_____no_output_____"]],[["############\n# [COMPLETE] \n# Evaluate your model on the testing set.\n############\n","_____no_output_____"]]],"string":"[\n [\n [\n \"# Final Lab\\n\\n*Felix Rojo Lapalma*\\n\\n## Main task\\n\\nIn this notebook, we will apply transfer learning techniques to finetune the [MobileNet](https://arxiv.org/pdf/1704.04861.pdf) CNN on [Cifar-10](https://www.cs.toronto.edu/~kriz/cifar.html) dataset.\\n\\n## Procedures\\n\\nIn general, the main steps that we will follow are:\\n\\n1. Load data, analyze and split in *training*/*validation*/*testing* sets.\\n2. Load CNN and analyze architecture.\\n3. Adapt this CNN to our problem.\\n4. Setup data augmentation techniques.\\n5. Add some keras callbacks.\\n6. Setup optimization algorithm with their hyperparameters.\\n7. Train model!\\n8. Choose best model/snapshot.\\n9. Evaluate final model on the *testing* set.\\n\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# load libs\\nimport os\\nimport matplotlib.pyplot as plt\\nfrom IPython.display import SVG\\n# https://keras.io/applications/#documentation-for-individual-models\\nfrom keras.applications.mobilenet import MobileNet\\nfrom keras.datasets import cifar10\\nfrom keras.models import Model\\nfrom keras.utils.vis_utils import model_to_dot\\nfrom keras.layers import Dense, GlobalAveragePooling2D,Dropout\\nfrom keras.preprocessing.image import ImageDataGenerator\\nfrom keras.utils import plot_model, to_categorical\\nfrom sklearn.model_selection import train_test_split\\nimport cv2\\nimport numpy as np\\nimport tensorflow as tf\",\n \"Using TensorFlow backend.\\n\"\n ]\n ],\n [\n [\n \"#### cuda\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"cuda_flag=False\\n\\nif cuda_flag:\\n # Setup one GPU for tensorflow (don't be greedy).\\n os.environ[\\\"CUDA_DEVICE_ORDER\\\"] = \\\"PCI_BUS_ID\\\"\\n # The GPU id to use, \\\"0\\\", \\\"1\\\", etc.\\n os.environ[\\\"CUDA_VISIBLE_DEVICES\\\"] = \\\"0\\\" \\n # Limit tensorflow gpu usage.\\n # Maybe you should comment this lines if you run tensorflow on CPU.\\n config = tf.ConfigProto()\\n config.gpu_options.allow_growth = True\\n config.gpu_options.per_process_gpu_memory_fraction = 0.3\\n sess = tf.Session(config=config)\\n\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"## 1. Load data, analyze and split in *training*/*validation*/*testing* sets\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# Cifar-10 class names\\n# We will create a dictionary for each type of label\\n# This is a mapping from the int class name to \\n# their corresponding string class name\\nLABELS = {\\n 0: \\\"airplane\\\",\\n 1: \\\"automobile\\\",\\n 2: \\\"bird\\\",\\n 3: \\\"cat\\\",\\n 4: \\\"deer\\\",\\n 5: \\\"dog\\\",\\n 6: \\\"frog\\\",\\n 7: \\\"horse\\\",\\n 8: \\\"ship\\\",\\n 9: \\\"truck\\\"\\n}\\n\\n# Load dataset from keras\\n(x_train_data, y_train_data), (x_test_data, y_test_data) = cifar10.load_data()\\n\\n############\\n# [COMPLETE] \\n# Add some prints here to see the loaded data dimensions\\n############\\n\\nprint(\\\"Cifar-10 x_train shape: {}\\\".format(x_train_data.shape))\\nprint(\\\"Cifar-10 y_train shape: {}\\\".format(y_train_data.shape))\\nprint(\\\"Cifar-10 x_test shape: {}\\\".format(x_test_data.shape))\\nprint(\\\"Cifar-10 y_test shape: {}\\\".format(y_test_data.shape))\",\n \"Cifar-10 x_train shape: (50000, 32, 32, 3)\\nCifar-10 y_train shape: (50000, 1)\\nCifar-10 x_test shape: (10000, 32, 32, 3)\\nCifar-10 y_test shape: (10000, 1)\\n\"\n ],\n [\n \"# from https://www.cs.toronto.edu/~kriz/cifar.html\\n# The CIFAR-10 dataset consists of 60000 32x32 colour images in 10 classes, with 6000 images per class. There are 50000 training images and 10000 test images. \\n# The classes are completely mutually exclusive. There is no overlap between automobiles and trucks. \\\"Automobile\\\" includes sedans, SUVs, things of that sort. \\\"Truck\\\" includes only big trucks. Neither includes pickup trucks. \\n# Some constants\\nIMG_ROWS = 32\\nIMG_COLS = 32\\nNUM_CLASSES = 10\\nRANDOM_STATE = 2018\\n############\\n# [COMPLETE] \\n# Analyze the amount of images for each class\\n# Plot some images to explore how they look\\n############\\nfrom genlib import get_classes_distribution,plot_label_per_class\\nfor y,yt in zip([y_train_data.flatten(),y_test_data.flatten()],['Train','Test']):\\n print('{:>15s}'.format(yt))\\n get_classes_distribution(y,LABELS)\\n plot_label_per_class(y,LABELS)\",\n \" Train\\nairplane : 5000 or 10.00%\\nautomobile : 5000 or 10.00%\\nbird : 5000 or 10.00%\\ncat : 5000 or 10.00%\\ndeer : 5000 or 10.00%\\ndog : 5000 or 10.00%\\nfrog : 5000 or 10.00%\\nhorse : 5000 or 10.00%\\nship : 5000 or 10.00%\\ntruck : 5000 or 10.00%\\n\"\n ]\n ],\n [\n [\n \"Todo parece ir de acuerdo a la documentación. Veamos las imagenes,\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"from genlib import sample_images_data,plot_sample_images\\nfor xy,yt in zip([(x_train_data,y_train_data.flatten()),(x_test_data,y_test_data.flatten())],['Train','Test']):\\n print('{:>15s}'.format(yt))\\n train_sample_images, train_sample_labels = sample_images_data(*xy,LABELS)\\n plot_sample_images(train_sample_images, train_sample_labels,LABELS)\",\n \" Train\\nTotal number of sample images to plot: 40\\n\"\n ],\n [\n \"############\\n# [COMPLETE] \\n# Split training set in train/val sets\\n# Use the sampling method that you want\\n############\\n#init seed\\nnp.random.seed(seed=RANDOM_STATE)\\n\\nfull_set_flag=False # True: uses all images / False only a subset specified by TRAIN Samples and Val Frac\\nVAL_FRAC=0.2\\nTRAIN_SIZE_BFV=x_train_data.shape[0]\\nTRAIN_FRAC=(1-VAL_FRAC)\\n# calc\\nTRAIN_SAMPLES_FULL=int(TRAIN_FRAC*TRAIN_SIZE_BFV) # if full_set_flag==True\\nTRAIN_SAMPLES_RED=20000 # if full_set_flag==False\\nVAL_SAMPLES_RED=int(VAL_FRAC*TRAIN_SAMPLES_RED) # if full_set_flag==False\\n \\nif full_set_flag:\\n # Esta forma parece servir si barremos todo el set sino...\\n #\\n # Get Index\\n train_idxs = np.random.choice(np.arange(TRAIN_SIZE_BFV), size=TRAIN_SAMPLES_FULL, replace=False)\\n val_idx=np.array([x for x in np.arange(TRAIN_SIZE_BFV) if x not in train_idxs]) \\nelse:\\n train_idxs = np.random.choice(np.arange(TRAIN_SIZE_BFV), size=TRAIN_SAMPLES_RED, replace=False)\\n val_idx=np.random.choice(train_idxs, size=VAL_SAMPLES_RED, replace=False)\\n \\n# Split\\nx_val_data = x_train_data[val_idx, :, :, :]\\ny_val_data = y_train_data[val_idx]\\nx_train_data = x_train_data[train_idxs, :, :, :]\\ny_train_data = y_train_data[train_idxs]\\n####\",\n \"_____no_output_____\"\n ],\n [\n \"####\\nprint(\\\"Cifar-10 x_train shape: {}\\\".format(x_train_data.shape))\\nprint(\\\"Cifar-10 y_train shape: {}\\\".format(y_train_data.shape))\\nprint(\\\"Cifar-10 x_val shape: {}\\\".format(x_val_data.shape))\\nprint(\\\"Cifar-10 y_val shape: {}\\\".format(y_val_data.shape))\\nprint(\\\"Cifar-10 x_test shape: {}\\\".format(x_test_data.shape))\\nprint(\\\"Cifar-10 y_test shape: {}\\\".format(y_test_data.shape))\",\n \"Cifar-10 x_train shape: (20000, 32, 32, 3)\\nCifar-10 y_train shape: (20000, 1)\\nCifar-10 x_val shape: (4000, 32, 32, 3)\\nCifar-10 y_val shape: (4000, 1)\\nCifar-10 x_test shape: (10000, 32, 32, 3)\\nCifar-10 y_test shape: (10000, 1)\\n\"\n ]\n ],\n [\n [\n \"Veamos si quedaron balanceados Train y Validation\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"for y,yt in zip([y_train_data.flatten(),y_val_data.flatten()],['Train','Validation']):\\n print('{:>15s}'.format(yt))\\n get_classes_distribution(y,LABELS)\\n plot_label_per_class(y,LABELS)\",\n \" Train\\nairplane : 1950 or 9.75%\\nautomobile : 1985 or 9.93%\\nbird : 2012 or 10.06%\\ncat : 1993 or 9.96%\\ndeer : 1943 or 9.71%\\ndog : 1994 or 9.97%\\nfrog : 2028 or 10.14%\\nhorse : 2030 or 10.15%\\nship : 2012 or 10.06%\\ntruck : 2053 or 10.27%\\n\"\n ],\n [\n \"# In order to use the MobileNet CNN pre-trained on imagenet, we have\\n# to resize our images to have one of the following static square shape: [(128, 128),\\n# (160, 160), (192, 192), or (224, 224)].\\n# If we try to resize all the dataset this will not fit on memory, so we have to save all\\n# the images to disk, and then when loading those images, our datagenerator will resize them\\n# to the desired shape on-the-fly.\\n\\n############\\n# [COMPLETE] \\n# Use the above function to save all your data, e.g.:\\n# save_to_disk(x_train, y_train, 'train', 'cifar10_images')\\n# save_to_disk(x_val, y_val, 'val', 'cifar10_images')\\n# save_to_disk(x_test, y_test, 'test', 'cifar10_images')\\n############\\n\\nsave_image_flag=False # To avoid saving images every time!!!\\n\\nif save_image_flag:\\n from genlib import save_to_disk\\n save_to_disk(x_train_data, y_train_data, 'train', output_dir='cifar10_images')\\n save_to_disk(x_val_data, y_val_data, 'val', output_dir='cifar10_images')\\n save_to_disk(x_test_data, y_test_data, 'test', output_dir='cifar10_images')\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"## 2. Load CNN and analyze architecture\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"#Model\\nNO_EPOCHS = 25\\nBATCH_SIZE = 32\\nNET_IMG_ROWS = 128\\nNET_IMG_COLS = 128\",\n \"_____no_output_____\"\n ],\n [\n \"############\\n# [COMPLETE] \\n# Use the MobileNet class from Keras to load your base model, pre-trained on imagenet.\\n# We wan't to load the pre-trained weights, but without the classification layer.\\n# Check the notebook '3_transfer-learning' or https://keras.io/applications/#mobilenet to get more\\n# info about how to load this network properly.\\n############\\n#Note that this model only supports the data format 'channels_last' (height, width, channels).\\n#The default input size for this model is 224x224.\\n\\nbase_model = MobileNet(input_shape=(NET_IMG_ROWS, NET_IMG_COLS, 3), # Input image size\\n weights='imagenet', # Use imagenet pre-trained weights\\n include_top=False, # Drop classification layer\\n pooling='avg') # Global AVG pooling for the \\n # output feature vector\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"## 3. Adapt this CNN to our problem\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"############\\n# [COMPLETE] \\n# Having the CNN loaded, now we have to add some layers to adapt this network to our\\n# classification problem.\\n# We can choose to finetune just the new added layers, some particular layers or all the layer of the\\n# model. Play with different settings and compare the results.\\n############\\n\\n# get the output feature vector from the base model\\nx = base_model.output\\n# let's add a fully-connected layer\\nx = Dense(1024, activation='relu')(x)\\n# Add Drop Out Layer\\nx=Dropout(0.5)(x)\\n# and a logistic layer\\npredictions = Dense(NUM_CLASSES, activation='softmax')(x)\\n\\n# this is the model we will train\\nmodel = Model(inputs=base_model.input, outputs=predictions)\",\n \"_____no_output_____\"\n ],\n [\n \"# Initial Model Summary\\nmodel.summary()\",\n \"_________________________________________________________________\\nLayer (type) Output Shape Param # \\n=================================================================\\ninput_1 (InputLayer) (None, 128, 128, 3) 0 \\n_________________________________________________________________\\nconv1_pad (ZeroPadding2D) (None, 129, 129, 3) 0 \\n_________________________________________________________________\\nconv1 (Conv2D) (None, 64, 64, 32) 864 \\n_________________________________________________________________\\nconv1_bn (BatchNormalization (None, 64, 64, 32) 128 \\n_________________________________________________________________\\nconv1_relu (ReLU) (None, 64, 64, 32) 0 \\n_________________________________________________________________\\nconv_dw_1 (DepthwiseConv2D) (None, 64, 64, 32) 288 \\n_________________________________________________________________\\nconv_dw_1_bn (BatchNormaliza (None, 64, 64, 32) 128 \\n_________________________________________________________________\\nconv_dw_1_relu (ReLU) (None, 64, 64, 32) 0 \\n_________________________________________________________________\\nconv_pw_1 (Conv2D) (None, 64, 64, 64) 2048 \\n_________________________________________________________________\\nconv_pw_1_bn (BatchNormaliza (None, 64, 64, 64) 256 \\n_________________________________________________________________\\nconv_pw_1_relu (ReLU) (None, 64, 64, 64) 0 \\n_________________________________________________________________\\nconv_pad_2 (ZeroPadding2D) (None, 65, 65, 64) 0 \\n_________________________________________________________________\\nconv_dw_2 (DepthwiseConv2D) (None, 32, 32, 64) 576 \\n_________________________________________________________________\\nconv_dw_2_bn (BatchNormaliza (None, 32, 32, 64) 256 \\n_________________________________________________________________\\nconv_dw_2_relu (ReLU) (None, 32, 32, 64) 0 \\n_________________________________________________________________\\nconv_pw_2 (Conv2D) (None, 32, 32, 128) 8192 \\n_________________________________________________________________\\nconv_pw_2_bn (BatchNormaliza (None, 32, 32, 128) 512 \\n_________________________________________________________________\\nconv_pw_2_relu (ReLU) (None, 32, 32, 128) 0 \\n_________________________________________________________________\\nconv_dw_3 (DepthwiseConv2D) (None, 32, 32, 128) 1152 \\n_________________________________________________________________\\nconv_dw_3_bn (BatchNormaliza (None, 32, 32, 128) 512 \\n_________________________________________________________________\\nconv_dw_3_relu (ReLU) (None, 32, 32, 128) 0 \\n_________________________________________________________________\\nconv_pw_3 (Conv2D) (None, 32, 32, 128) 16384 \\n_________________________________________________________________\\nconv_pw_3_bn (BatchNormaliza (None, 32, 32, 128) 512 \\n_________________________________________________________________\\nconv_pw_3_relu (ReLU) (None, 32, 32, 128) 0 \\n_________________________________________________________________\\nconv_pad_4 (ZeroPadding2D) (None, 33, 33, 128) 0 \\n_________________________________________________________________\\nconv_dw_4 (DepthwiseConv2D) (None, 16, 16, 128) 1152 \\n_________________________________________________________________\\nconv_dw_4_bn (BatchNormaliza (None, 16, 16, 128) 512 \\n_________________________________________________________________\\nconv_dw_4_relu (ReLU) (None, 16, 16, 128) 0 \\n_________________________________________________________________\\nconv_pw_4 (Conv2D) (None, 16, 16, 256) 32768 \\n_________________________________________________________________\\nconv_pw_4_bn (BatchNormaliza (None, 16, 16, 256) 1024 \\n_________________________________________________________________\\nconv_pw_4_relu (ReLU) (None, 16, 16, 256) 0 \\n_________________________________________________________________\\nconv_dw_5 (DepthwiseConv2D) (None, 16, 16, 256) 2304 \\n_________________________________________________________________\\nconv_dw_5_bn (BatchNormaliza (None, 16, 16, 256) 1024 \\n_________________________________________________________________\\nconv_dw_5_relu (ReLU) (None, 16, 16, 256) 0 \\n_________________________________________________________________\\nconv_pw_5 (Conv2D) (None, 16, 16, 256) 65536 \\n_________________________________________________________________\\nconv_pw_5_bn (BatchNormaliza (None, 16, 16, 256) 1024 \\n_________________________________________________________________\\nconv_pw_5_relu (ReLU) (None, 16, 16, 256) 0 \\n_________________________________________________________________\\nconv_pad_6 (ZeroPadding2D) (None, 17, 17, 256) 0 \\n_________________________________________________________________\\nconv_dw_6 (DepthwiseConv2D) (None, 8, 8, 256) 2304 \\n_________________________________________________________________\\nconv_dw_6_bn (BatchNormaliza (None, 8, 8, 256) 1024 \\n_________________________________________________________________\\nconv_dw_6_relu (ReLU) (None, 8, 8, 256) 0 \\n_________________________________________________________________\\nconv_pw_6 (Conv2D) (None, 8, 8, 512) 131072 \\n_________________________________________________________________\\nconv_pw_6_bn (BatchNormaliza (None, 8, 8, 512) 2048 \\n_________________________________________________________________\\nconv_pw_6_relu (ReLU) (None, 8, 8, 512) 0 \\n_________________________________________________________________\\nconv_dw_7 (DepthwiseConv2D) (None, 8, 8, 512) 4608 \\n_________________________________________________________________\\nconv_dw_7_bn (BatchNormaliza (None, 8, 8, 512) 2048 \\n_________________________________________________________________\\nconv_dw_7_relu (ReLU) (None, 8, 8, 512) 0 \\n_________________________________________________________________\\nconv_pw_7 (Conv2D) (None, 8, 8, 512) 262144 \\n_________________________________________________________________\\nconv_pw_7_bn (BatchNormaliza (None, 8, 8, 512) 2048 \\n_________________________________________________________________\\nconv_pw_7_relu (ReLU) (None, 8, 8, 512) 0 \\n_________________________________________________________________\\nconv_dw_8 (DepthwiseConv2D) (None, 8, 8, 512) 4608 \\n_________________________________________________________________\\nconv_dw_8_bn (BatchNormaliza (None, 8, 8, 512) 2048 \\n_________________________________________________________________\\nconv_dw_8_relu (ReLU) (None, 8, 8, 512) 0 \\n_________________________________________________________________\\nconv_pw_8 (Conv2D) (None, 8, 8, 512) 262144 \\n_________________________________________________________________\\nconv_pw_8_bn (BatchNormaliza (None, 8, 8, 512) 2048 \\n_________________________________________________________________\\nconv_pw_8_relu (ReLU) (None, 8, 8, 512) 0 \\n_________________________________________________________________\\nconv_dw_9 (DepthwiseConv2D) (None, 8, 8, 512) 4608 \\n_________________________________________________________________\\nconv_dw_9_bn (BatchNormaliza (None, 8, 8, 512) 2048 \\n_________________________________________________________________\\nconv_dw_9_relu (ReLU) (None, 8, 8, 512) 0 \\n_________________________________________________________________\\nconv_pw_9 (Conv2D) (None, 8, 8, 512) 262144 \\n_________________________________________________________________\\nconv_pw_9_bn (BatchNormaliza (None, 8, 8, 512) 2048 \\n_________________________________________________________________\\nconv_pw_9_relu (ReLU) (None, 8, 8, 512) 0 \\n_________________________________________________________________\\nconv_dw_10 (DepthwiseConv2D) (None, 8, 8, 512) 4608 \\n_________________________________________________________________\\nconv_dw_10_bn (BatchNormaliz (None, 8, 8, 512) 2048 \\n_________________________________________________________________\\nconv_dw_10_relu (ReLU) (None, 8, 8, 512) 0 \\n_________________________________________________________________\\nconv_pw_10 (Conv2D) (None, 8, 8, 512) 262144 \\n_________________________________________________________________\\nconv_pw_10_bn (BatchNormaliz (None, 8, 8, 512) 2048 \\n_________________________________________________________________\\nconv_pw_10_relu (ReLU) (None, 8, 8, 512) 0 \\n_________________________________________________________________\\nconv_dw_11 (DepthwiseConv2D) (None, 8, 8, 512) 4608 \\n_________________________________________________________________\\nconv_dw_11_bn (BatchNormaliz (None, 8, 8, 512) 2048 \\n_________________________________________________________________\\nconv_dw_11_relu (ReLU) (None, 8, 8, 512) 0 \\n_________________________________________________________________\\nconv_pw_11 (Conv2D) (None, 8, 8, 512) 262144 \\n_________________________________________________________________\\nconv_pw_11_bn (BatchNormaliz (None, 8, 8, 512) 2048 \\n_________________________________________________________________\\nconv_pw_11_relu (ReLU) (None, 8, 8, 512) 0 \\n_________________________________________________________________\\nconv_pad_12 (ZeroPadding2D) (None, 9, 9, 512) 0 \\n_________________________________________________________________\\nconv_dw_12 (DepthwiseConv2D) (None, 4, 4, 512) 4608 \\n_________________________________________________________________\\nconv_dw_12_bn (BatchNormaliz (None, 4, 4, 512) 2048 \\n_________________________________________________________________\\nconv_dw_12_relu (ReLU) (None, 4, 4, 512) 0 \\n_________________________________________________________________\\nconv_pw_12 (Conv2D) (None, 4, 4, 1024) 524288 \\n_________________________________________________________________\\nconv_pw_12_bn (BatchNormaliz (None, 4, 4, 1024) 4096 \\n_________________________________________________________________\\nconv_pw_12_relu (ReLU) (None, 4, 4, 1024) 0 \\n_________________________________________________________________\\nconv_dw_13 (DepthwiseConv2D) (None, 4, 4, 1024) 9216 \\n_________________________________________________________________\\nconv_dw_13_bn (BatchNormaliz (None, 4, 4, 1024) 4096 \\n_________________________________________________________________\\nconv_dw_13_relu (ReLU) (None, 4, 4, 1024) 0 \\n_________________________________________________________________\\nconv_pw_13 (Conv2D) (None, 4, 4, 1024) 1048576 \\n_________________________________________________________________\\nconv_pw_13_bn (BatchNormaliz (None, 4, 4, 1024) 4096 \\n_________________________________________________________________\\nconv_pw_13_relu (ReLU) (None, 4, 4, 1024) 0 \\n_________________________________________________________________\\nglobal_average_pooling2d_1 ( (None, 1024) 0 \\n_________________________________________________________________\\ndense_1 (Dense) (None, 1024) 1049600 \\n_________________________________________________________________\\ndropout_1 (Dropout) (None, 1024) 0 \\n_________________________________________________________________\\ndense_2 (Dense) (None, 10) 10250 \\n=================================================================\\nTotal params: 4,288,714\\nTrainable params: 4,266,826\\nNon-trainable params: 21,888\\n_________________________________________________________________\\n\"\n ],\n [\n \"model_png=False\\nif model_png:\\n plot_model(model, to_file='model.png')\\n SVG(model_to_dot(model).create(prog='dot', format='svg'))\",\n \"_____no_output_____\"\n ],\n [\n \"# let's visualize layer names and layer indices to see how many layers\\n# we should freeze:\\nfor i, layer in enumerate(model.layers):\\n print(i, layer.name)\",\n \"0 input_1\\n1 conv1_pad\\n2 conv1\\n3 conv1_bn\\n4 conv1_relu\\n5 conv_dw_1\\n6 conv_dw_1_bn\\n7 conv_dw_1_relu\\n8 conv_pw_1\\n9 conv_pw_1_bn\\n10 conv_pw_1_relu\\n11 conv_pad_2\\n12 conv_dw_2\\n13 conv_dw_2_bn\\n14 conv_dw_2_relu\\n15 conv_pw_2\\n16 conv_pw_2_bn\\n17 conv_pw_2_relu\\n18 conv_dw_3\\n19 conv_dw_3_bn\\n20 conv_dw_3_relu\\n21 conv_pw_3\\n22 conv_pw_3_bn\\n23 conv_pw_3_relu\\n24 conv_pad_4\\n25 conv_dw_4\\n26 conv_dw_4_bn\\n27 conv_dw_4_relu\\n28 conv_pw_4\\n29 conv_pw_4_bn\\n30 conv_pw_4_relu\\n31 conv_dw_5\\n32 conv_dw_5_bn\\n33 conv_dw_5_relu\\n34 conv_pw_5\\n35 conv_pw_5_bn\\n36 conv_pw_5_relu\\n37 conv_pad_6\\n38 conv_dw_6\\n39 conv_dw_6_bn\\n40 conv_dw_6_relu\\n41 conv_pw_6\\n42 conv_pw_6_bn\\n43 conv_pw_6_relu\\n44 conv_dw_7\\n45 conv_dw_7_bn\\n46 conv_dw_7_relu\\n47 conv_pw_7\\n48 conv_pw_7_bn\\n49 conv_pw_7_relu\\n50 conv_dw_8\\n51 conv_dw_8_bn\\n52 conv_dw_8_relu\\n53 conv_pw_8\\n54 conv_pw_8_bn\\n55 conv_pw_8_relu\\n56 conv_dw_9\\n57 conv_dw_9_bn\\n58 conv_dw_9_relu\\n59 conv_pw_9\\n60 conv_pw_9_bn\\n61 conv_pw_9_relu\\n62 conv_dw_10\\n63 conv_dw_10_bn\\n64 conv_dw_10_relu\\n65 conv_pw_10\\n66 conv_pw_10_bn\\n67 conv_pw_10_relu\\n68 conv_dw_11\\n69 conv_dw_11_bn\\n70 conv_dw_11_relu\\n71 conv_pw_11\\n72 conv_pw_11_bn\\n73 conv_pw_11_relu\\n74 conv_pad_12\\n75 conv_dw_12\\n76 conv_dw_12_bn\\n77 conv_dw_12_relu\\n78 conv_pw_12\\n79 conv_pw_12_bn\\n80 conv_pw_12_relu\\n81 conv_dw_13\\n82 conv_dw_13_bn\\n83 conv_dw_13_relu\\n84 conv_pw_13\\n85 conv_pw_13_bn\\n86 conv_pw_13_relu\\n87 global_average_pooling2d_1\\n88 dense_1\\n89 dropout_1\\n90 dense_2\\n\"\n ],\n [\n \"# En esta instancia no pretendemos entrenar todas sino las ultimas agregadas \\nfor layer in model.layers[:88]:\\n layer.trainable = False\\nfor layer in model.layers[88:]:\\n layer.trainable = True\",\n \"_____no_output_____\"\n ],\n [\n \"model.summary()\",\n \"_________________________________________________________________\\nLayer (type) Output Shape Param # \\n=================================================================\\ninput_1 (InputLayer) (None, 128, 128, 3) 0 \\n_________________________________________________________________\\nconv1_pad (ZeroPadding2D) (None, 129, 129, 3) 0 \\n_________________________________________________________________\\nconv1 (Conv2D) (None, 64, 64, 32) 864 \\n_________________________________________________________________\\nconv1_bn (BatchNormalization (None, 64, 64, 32) 128 \\n_________________________________________________________________\\nconv1_relu (ReLU) (None, 64, 64, 32) 0 \\n_________________________________________________________________\\nconv_dw_1 (DepthwiseConv2D) (None, 64, 64, 32) 288 \\n_________________________________________________________________\\nconv_dw_1_bn (BatchNormaliza (None, 64, 64, 32) 128 \\n_________________________________________________________________\\nconv_dw_1_relu (ReLU) (None, 64, 64, 32) 0 \\n_________________________________________________________________\\nconv_pw_1 (Conv2D) (None, 64, 64, 64) 2048 \\n_________________________________________________________________\\nconv_pw_1_bn (BatchNormaliza (None, 64, 64, 64) 256 \\n_________________________________________________________________\\nconv_pw_1_relu (ReLU) (None, 64, 64, 64) 0 \\n_________________________________________________________________\\nconv_pad_2 (ZeroPadding2D) (None, 65, 65, 64) 0 \\n_________________________________________________________________\\nconv_dw_2 (DepthwiseConv2D) (None, 32, 32, 64) 576 \\n_________________________________________________________________\\nconv_dw_2_bn (BatchNormaliza (None, 32, 32, 64) 256 \\n_________________________________________________________________\\nconv_dw_2_relu (ReLU) (None, 32, 32, 64) 0 \\n_________________________________________________________________\\nconv_pw_2 (Conv2D) (None, 32, 32, 128) 8192 \\n_________________________________________________________________\\nconv_pw_2_bn (BatchNormaliza (None, 32, 32, 128) 512 \\n_________________________________________________________________\\nconv_pw_2_relu (ReLU) (None, 32, 32, 128) 0 \\n_________________________________________________________________\\nconv_dw_3 (DepthwiseConv2D) (None, 32, 32, 128) 1152 \\n_________________________________________________________________\\nconv_dw_3_bn (BatchNormaliza (None, 32, 32, 128) 512 \\n_________________________________________________________________\\nconv_dw_3_relu (ReLU) (None, 32, 32, 128) 0 \\n_________________________________________________________________\\nconv_pw_3 (Conv2D) (None, 32, 32, 128) 16384 \\n_________________________________________________________________\\nconv_pw_3_bn (BatchNormaliza (None, 32, 32, 128) 512 \\n_________________________________________________________________\\nconv_pw_3_relu (ReLU) (None, 32, 32, 128) 0 \\n_________________________________________________________________\\nconv_pad_4 (ZeroPadding2D) (None, 33, 33, 128) 0 \\n_________________________________________________________________\\nconv_dw_4 (DepthwiseConv2D) (None, 16, 16, 128) 1152 \\n_________________________________________________________________\\nconv_dw_4_bn (BatchNormaliza (None, 16, 16, 128) 512 \\n_________________________________________________________________\\nconv_dw_4_relu (ReLU) (None, 16, 16, 128) 0 \\n_________________________________________________________________\\nconv_pw_4 (Conv2D) (None, 16, 16, 256) 32768 \\n_________________________________________________________________\\nconv_pw_4_bn (BatchNormaliza (None, 16, 16, 256) 1024 \\n_________________________________________________________________\\nconv_pw_4_relu (ReLU) (None, 16, 16, 256) 0 \\n_________________________________________________________________\\nconv_dw_5 (DepthwiseConv2D) (None, 16, 16, 256) 2304 \\n_________________________________________________________________\\nconv_dw_5_bn (BatchNormaliza (None, 16, 16, 256) 1024 \\n_________________________________________________________________\\nconv_dw_5_relu (ReLU) (None, 16, 16, 256) 0 \\n_________________________________________________________________\\nconv_pw_5 (Conv2D) (None, 16, 16, 256) 65536 \\n_________________________________________________________________\\nconv_pw_5_bn (BatchNormaliza (None, 16, 16, 256) 1024 \\n_________________________________________________________________\\nconv_pw_5_relu (ReLU) (None, 16, 16, 256) 0 \\n_________________________________________________________________\\nconv_pad_6 (ZeroPadding2D) (None, 17, 17, 256) 0 \\n_________________________________________________________________\\nconv_dw_6 (DepthwiseConv2D) (None, 8, 8, 256) 2304 \\n_________________________________________________________________\\nconv_dw_6_bn (BatchNormaliza (None, 8, 8, 256) 1024 \\n_________________________________________________________________\\nconv_dw_6_relu (ReLU) (None, 8, 8, 256) 0 \\n_________________________________________________________________\\nconv_pw_6 (Conv2D) (None, 8, 8, 512) 131072 \\n_________________________________________________________________\\nconv_pw_6_bn (BatchNormaliza (None, 8, 8, 512) 2048 \\n_________________________________________________________________\\nconv_pw_6_relu (ReLU) (None, 8, 8, 512) 0 \\n_________________________________________________________________\\nconv_dw_7 (DepthwiseConv2D) (None, 8, 8, 512) 4608 \\n_________________________________________________________________\\nconv_dw_7_bn (BatchNormaliza (None, 8, 8, 512) 2048 \\n_________________________________________________________________\\nconv_dw_7_relu (ReLU) (None, 8, 8, 512) 0 \\n_________________________________________________________________\\nconv_pw_7 (Conv2D) (None, 8, 8, 512) 262144 \\n_________________________________________________________________\\nconv_pw_7_bn (BatchNormaliza (None, 8, 8, 512) 2048 \\n_________________________________________________________________\\nconv_pw_7_relu (ReLU) (None, 8, 8, 512) 0 \\n_________________________________________________________________\\nconv_dw_8 (DepthwiseConv2D) (None, 8, 8, 512) 4608 \\n_________________________________________________________________\\nconv_dw_8_bn (BatchNormaliza (None, 8, 8, 512) 2048 \\n_________________________________________________________________\\nconv_dw_8_relu (ReLU) (None, 8, 8, 512) 0 \\n_________________________________________________________________\\nconv_pw_8 (Conv2D) (None, 8, 8, 512) 262144 \\n_________________________________________________________________\\nconv_pw_8_bn (BatchNormaliza (None, 8, 8, 512) 2048 \\n_________________________________________________________________\\nconv_pw_8_relu (ReLU) (None, 8, 8, 512) 0 \\n_________________________________________________________________\\nconv_dw_9 (DepthwiseConv2D) (None, 8, 8, 512) 4608 \\n_________________________________________________________________\\nconv_dw_9_bn (BatchNormaliza (None, 8, 8, 512) 2048 \\n_________________________________________________________________\\nconv_dw_9_relu (ReLU) (None, 8, 8, 512) 0 \\n_________________________________________________________________\\nconv_pw_9 (Conv2D) (None, 8, 8, 512) 262144 \\n_________________________________________________________________\\nconv_pw_9_bn (BatchNormaliza (None, 8, 8, 512) 2048 \\n_________________________________________________________________\\nconv_pw_9_relu (ReLU) (None, 8, 8, 512) 0 \\n_________________________________________________________________\\nconv_dw_10 (DepthwiseConv2D) (None, 8, 8, 512) 4608 \\n_________________________________________________________________\\nconv_dw_10_bn (BatchNormaliz (None, 8, 8, 512) 2048 \\n_________________________________________________________________\\nconv_dw_10_relu (ReLU) (None, 8, 8, 512) 0 \\n_________________________________________________________________\\nconv_pw_10 (Conv2D) (None, 8, 8, 512) 262144 \\n_________________________________________________________________\\nconv_pw_10_bn (BatchNormaliz (None, 8, 8, 512) 2048 \\n_________________________________________________________________\\nconv_pw_10_relu (ReLU) (None, 8, 8, 512) 0 \\n_________________________________________________________________\\nconv_dw_11 (DepthwiseConv2D) (None, 8, 8, 512) 4608 \\n_________________________________________________________________\\nconv_dw_11_bn (BatchNormaliz (None, 8, 8, 512) 2048 \\n_________________________________________________________________\\nconv_dw_11_relu (ReLU) (None, 8, 8, 512) 0 \\n_________________________________________________________________\\nconv_pw_11 (Conv2D) (None, 8, 8, 512) 262144 \\n_________________________________________________________________\\nconv_pw_11_bn (BatchNormaliz (None, 8, 8, 512) 2048 \\n_________________________________________________________________\\nconv_pw_11_relu (ReLU) (None, 8, 8, 512) 0 \\n_________________________________________________________________\\nconv_pad_12 (ZeroPadding2D) (None, 9, 9, 512) 0 \\n_________________________________________________________________\\nconv_dw_12 (DepthwiseConv2D) (None, 4, 4, 512) 4608 \\n_________________________________________________________________\\nconv_dw_12_bn (BatchNormaliz (None, 4, 4, 512) 2048 \\n_________________________________________________________________\\nconv_dw_12_relu (ReLU) (None, 4, 4, 512) 0 \\n_________________________________________________________________\\nconv_pw_12 (Conv2D) (None, 4, 4, 1024) 524288 \\n_________________________________________________________________\\nconv_pw_12_bn (BatchNormaliz (None, 4, 4, 1024) 4096 \\n_________________________________________________________________\\nconv_pw_12_relu (ReLU) (None, 4, 4, 1024) 0 \\n_________________________________________________________________\\nconv_dw_13 (DepthwiseConv2D) (None, 4, 4, 1024) 9216 \\n_________________________________________________________________\\nconv_dw_13_bn (BatchNormaliz (None, 4, 4, 1024) 4096 \\n_________________________________________________________________\\nconv_dw_13_relu (ReLU) (None, 4, 4, 1024) 0 \\n_________________________________________________________________\\nconv_pw_13 (Conv2D) (None, 4, 4, 1024) 1048576 \\n_________________________________________________________________\\nconv_pw_13_bn (BatchNormaliz (None, 4, 4, 1024) 4096 \\n_________________________________________________________________\\nconv_pw_13_relu (ReLU) (None, 4, 4, 1024) 0 \\n_________________________________________________________________\\nglobal_average_pooling2d_1 ( (None, 1024) 0 \\n_________________________________________________________________\\ndense_1 (Dense) (None, 1024) 1049600 \\n_________________________________________________________________\\ndropout_1 (Dropout) (None, 1024) 0 \\n_________________________________________________________________\\ndense_2 (Dense) (None, 10) 10250 \\n=================================================================\\nTotal params: 4,288,714\\nTrainable params: 1,059,850\\nNon-trainable params: 3,228,864\\n_________________________________________________________________\\n\"\n ]\n ],\n [\n [\n \"## 4. Setup data augmentation techniques\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"############\\n# [COMPLETE] \\n# Use data augmentation to train your model.\\n# Use the Keras ImageDataGenerator class for this porpouse.\\n# Note: Given that we want to load our images from disk, instead of using \\n# ImageDataGenerator.flow method, we have to use ImageDataGenerator.flow_from_directory \\n# method in the following way:\\n# generator_train = dataget_train.flow_from_directory('resized_images/train', \\n# target_size=(128, 128), batch_size=32)\\n# generator_val = dataget_train.flow_from_directory('resized_images/val', \\n# target_size=(128, 128), batch_size=32)\\n# Note that we have to resize our images to finetune the MobileNet CNN, this is done using \\n# the target_size argument in flow_from_directory. Remember to set the target_size to one of\\n# the valid listed here: [(128, 128), (160, 160), (192, 192), or (224, 224)].\\n############\\ndata_get=ImageDataGenerator()\\ngenerator_train = data_get.flow_from_directory(directory='cifar10_images/train',\\n target_size=(128, 128), batch_size=BATCH_SIZE)\\ngenerator_val = data_get.flow_from_directory(directory='cifar10_images/val', \\n target_size=(128, 128), batch_size=BATCH_SIZE)\\n\",\n \"Found 40000 images belonging to 10 classes.\\nFound 10000 images belonging to 10 classes.\\n\"\n ]\n ],\n [\n [\n \"## 5. Add some keras callbacks\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"############\\n# [COMPLETE] \\n# Load and set some Keras callbacks here!\\n############\\n\\nEXP_ID='experiment_003/'\\n\\nfrom keras.callbacks import ModelCheckpoint, TensorBoard\\n\\nif not os.path.exists(EXP_ID):\\n os.makedirs(EXP_ID)\\n\\ncallbacks = [\\n ModelCheckpoint(filepath=os.path.join(EXP_ID, 'weights.{epoch:02d}-{val_loss:.2f}.hdf5'),\\n monitor='val_loss', \\n verbose=1, \\n save_best_only=False, \\n save_weights_only=False, \\n mode='auto'),\\n TensorBoard(log_dir=os.path.join(EXP_ID, 'logs'), \\n write_graph=True, \\n write_images=False)\\n]\\n\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"## 6. Setup optimization algorithm with their hyperparameters\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"############\\n# [COMPLETE] \\n# Choose some optimization algorithm and explore different hyperparameters.\\n# Compile your model.\\n############\\nfrom keras.optimizers import SGD\\nfrom keras.losses import categorical_crossentropy\\n#model.compile(optimizer=SGD(lr=0.0001, momentum=0.9), \\n# loss='categorical_crossentropy',\\n# metrics=['accuracy'])\\n\\n\\nmodel.compile(loss=categorical_crossentropy,\\n optimizer='adam',\\n metrics=['accuracy'])\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"## 7. Train model!\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"generator_train.n\",\n \"_____no_output_____\"\n ],\n [\n \"############\\n# [COMPLETE] \\n# Use fit_generator to train your model.\\n# e.g.:\\n# model.fit_generator(\\n# generator_train,\\n# epochs=50,\\n# validation_data=generator_val,\\n# steps_per_epoch=generator_train.n // 32,\\n# validation_steps=generator_val.n // 32)\\n############\\nif full_set_flag:\\n steps_per_epoch=generator_train.n // BATCH_SIZE\\n validation_steps=generator_val.n // BATCH_SIZE\\nelse:\\n steps_per_epoch=TRAIN_SAMPLES_RED // BATCH_SIZE\\n validation_steps=VAL_SAMPLES_RED // BATCH_SIZE \\n\\n\\nmodel.fit_generator(generator_train,\\n epochs=NO_EPOCHS,\\n validation_data=generator_val,\\n steps_per_epoch=steps_per_epoch,\\n validation_steps=validation_steps,\\n callbacks=callbacks)\",\n \"Epoch 1/25\\n625/625 [==============================] - 1911s 3s/step - loss: 0.7648 - acc: 0.7352 - val_loss: 2.4989 - val_acc: 0.2167\\n\\nEpoch 00001: saving model to experiment_003/weights.01-2.50.hdf5\\nEpoch 2/25\\n625/625 [==============================] - 1927s 3s/step - loss: 0.7447 - acc: 0.7426 - val_loss: 2.7681 - val_acc: 0.1904\\n\\nEpoch 00002: saving model to experiment_003/weights.02-2.77.hdf5\\nEpoch 3/25\\n625/625 [==============================] - 1902s 3s/step - loss: 0.6890 - acc: 0.7630 - val_loss: 2.9040 - val_acc: 0.2019\\n\\nEpoch 00003: saving model to experiment_003/weights.03-2.90.hdf5\\nEpoch 4/25\\n625/625 [==============================] - 1933s 3s/step - loss: 0.6982 - acc: 0.7597 - val_loss: 2.9734 - val_acc: 0.1787\\n\\nEpoch 00004: saving model to experiment_003/weights.04-2.97.hdf5\\nEpoch 5/25\\n625/625 [==============================] - 1914s 3s/step - loss: 0.6404 - acc: 0.7810 - val_loss: 2.3613 - val_acc: 0.2074\\n\\nEpoch 00005: saving model to experiment_003/weights.05-2.36.hdf5\\nEpoch 6/25\\n625/625 [==============================] - 1903s 3s/step - loss: 0.6643 - acc: 0.7724 - val_loss: 2.6470 - val_acc: 0.2183\\n\\nEpoch 00006: saving model to experiment_003/weights.06-2.65.hdf5\\nEpoch 7/25\\n625/625 [==============================] - 1924s 3s/step - loss: 0.6096 - acc: 0.7885 - val_loss: 2.4154 - val_acc: 0.2025\\n\\nEpoch 00007: saving model to experiment_003/weights.07-2.42.hdf5\\nEpoch 8/25\\n625/625 [==============================] - 1935s 3s/step - loss: 0.6471 - acc: 0.7776 - val_loss: 2.5618 - val_acc: 0.2140\\n\\nEpoch 00008: saving model to experiment_003/weights.08-2.56.hdf5\\nEpoch 9/25\\n625/625 [==============================] - 2020s 3s/step - loss: 0.5878 - acc: 0.7964 - val_loss: 3.1497 - val_acc: 0.1823\\n\\nEpoch 00009: saving model to experiment_003/weights.09-3.15.hdf5\\nEpoch 10/25\\n625/625 [==============================] - 1981s 3s/step - loss: 0.6049 - acc: 0.7921 - val_loss: 3.1617 - val_acc: 0.1673\\n\\nEpoch 00010: saving model to experiment_003/weights.10-3.16.hdf5\\nEpoch 11/25\\n624/625 [============================>.] - ETA: 5s - loss: 0.5667 - acc: 0.8012 \"\n ]\n ],\n [\n [\n \"## 8. Choose best model/snapshot\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"############\\n# [COMPLETE] \\n# Analyze and compare your results. Choose the best model and snapshot, \\n# justify your election. \\n############\\n\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"## 9. Evaluate final model on the *testing* set\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"############\\n# [COMPLETE] \\n# Evaluate your model on the testing set.\\n############\\n\",\n \"_____no_output_____\"\n ]\n ]\n]"},"cell_types":{"kind":"list like","value":["markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code"],"string":"[\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\"\n]"},"cell_type_groups":{"kind":"list like","value":[["markdown"],["code"],["markdown"],["code"],["markdown"],["code","code"],["markdown"],["code","code","code"],["markdown"],["code","code"],["markdown"],["code","code"],["markdown"],["code","code","code","code","code","code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code","code"],["markdown"],["code"],["markdown"],["code"]],"string":"[\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ]\n]"}}},{"rowIdx":1459068,"cells":{"hexsha":{"kind":"string","value":"e7f00d1aafb608e6c77ca2a626633e287ac02f5c"},"size":{"kind":"number","value":48909,"string":"48,909"},"ext":{"kind":"string","value":"ipynb"},"lang":{"kind":"string","value":"Jupyter Notebook"},"max_stars_repo_path":{"kind":"string","value":"tutorials/SingleQubitGates/SingleQubitGates.ipynb"},"max_stars_repo_name":{"kind":"string","value":"JohanC68/QuantumKatas"},"max_stars_repo_head_hexsha":{"kind":"string","value":"11eea1da7e5b493d141a0a35889032a126022f05"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"tutorials/SingleQubitGates/SingleQubitGates.ipynb"},"max_issues_repo_name":{"kind":"string","value":"JohanC68/QuantumKatas"},"max_issues_repo_head_hexsha":{"kind":"string","value":"11eea1da7e5b493d141a0a35889032a126022f05"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"tutorials/SingleQubitGates/SingleQubitGates.ipynb"},"max_forks_repo_name":{"kind":"string","value":"JohanC68/QuantumKatas"},"max_forks_repo_head_hexsha":{"kind":"string","value":"11eea1da7e5b493d141a0a35889032a126022f05"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"number","value":1,"string":"1"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2020-12-10T16:54:22.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2020-12-10T16:54:22.000Z"},"avg_line_length":{"kind":"number","value":53.9239250276,"string":"53.923925"},"max_line_length":{"kind":"number","value":565,"string":"565"},"alphanum_fraction":{"kind":"number","value":0.5748226298,"string":"0.574823"},"cells":{"kind":"list like","value":[[["empty"]]],"string":"[\n [\n [\n \"empty\"\n ]\n ]\n]"},"cell_types":{"kind":"list like","value":["empty"],"string":"[\n \"empty\"\n]"},"cell_type_groups":{"kind":"list like","value":[["empty"]],"string":"[\n [\n \"empty\"\n ]\n]"}}},{"rowIdx":1459069,"cells":{"hexsha":{"kind":"string","value":"e7f00d8fa9c76e298ab633753e14b52d20232e2f"},"size":{"kind":"number","value":295451,"string":"295,451"},"ext":{"kind":"string","value":"ipynb"},"lang":{"kind":"string","value":"Jupyter Notebook"},"max_stars_repo_path":{"kind":"string","value":"Project-2/FinalProjectFeatureSelection.ipynb"},"max_stars_repo_name":{"kind":"string","value":"JasonCZH4/SCNU-CS-2018-DataMining"},"max_stars_repo_head_hexsha":{"kind":"string","value":"aba4cb2045d70808a7fa2af75600d7e66b5c0151"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":2,"string":"2"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2021-07-04T04:34:51.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2021-12-23T01:42:39.000Z"},"max_issues_repo_path":{"kind":"string","value":"Project-2/FinalProjectFeatureSelection.ipynb"},"max_issues_repo_name":{"kind":"string","value":"charfole/SCNU-CS-2018-DataMining"},"max_issues_repo_head_hexsha":{"kind":"string","value":"aba4cb2045d70808a7fa2af75600d7e66b5c0151"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"Project-2/FinalProjectFeatureSelection.ipynb"},"max_forks_repo_name":{"kind":"string","value":"charfole/SCNU-CS-2018-DataMining"},"max_forks_repo_head_hexsha":{"kind":"string","value":"aba4cb2045d70808a7fa2af75600d7e66b5c0151"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"number","value":1,"string":"1"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2021-07-04T04:37:04.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2021-07-04T04:37:04.000Z"},"avg_line_length":{"kind":"number","value":38.7832764505,"string":"38.783276"},"max_line_length":{"kind":"number","value":168,"string":"168"},"alphanum_fraction":{"kind":"number","value":0.26910046,"string":"0.2691"},"cells":{"kind":"list like","value":[[["# 导入库","_____no_output_____"]],[["import pandas as pd\r\nimport numpy as np\r\nfrom sklearn.svm import LinearSVR, LinearSVC\r\nfrom sklearn.svm import *\r\nfrom sklearn.linear_model import Lasso, LogisticRegression, LinearRegression\r\nfrom sklearn.tree import DecisionTreeRegressor,DecisionTreeClassifier\r\nfrom sklearn.ensemble import RandomForestRegressor, RandomForestClassifier, GradientBoostingRegressor, GradientBoostingClassifier\r\nfrom sklearn.feature_selection import SelectFromModel\r\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\r\nfrom sklearn.decomposition import PCA,LatentDirichletAllocation\r\nfrom sklearn.metrics import *\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.pipeline import Pipeline\r\nfrom sklearn.preprocessing import StandardScaler","_____no_output_____"]],[["# 读取数据集","_____no_output_____"]],[["filePath = './data/138rows_after.xlsx'\ndataFrame = pd.read_excel(filePath)\ndataArray = np.array(dataFrame)\ndataFrame","_____no_output_____"]],[["# 获取标签列","_____no_output_____"]],[["name = [column for column in dataFrame]\r\nname = name[5:]\r\npd.DataFrame(name)","_____no_output_____"]],[["# 查看数据规模","_____no_output_____"]],[["X_withLabel = dataArray[:92,5:]\r\nX_all = dataArray[:,5:] \r\ny_data = dataArray[:92,3]\r\ny_label= dataArray[:92,4].astype(int)\r\nprint(\"有标签数据的规模:\",X_withLabel.shape)\r\nprint(\"所有数据的规模:\",X_all.shape)\r\nprint(\"回归标签的规模:\",y_data.shape)\r\nprint(\"分类标签的规模:\",y_label.shape)","有标签数据的规模: (92, 76)\n所有数据的规模: (138, 76)\n回归标签的规模: (92,)\n分类标签的规模: (92,)\n"]],[["# 回归","_____no_output_____"],["## 利用Lasso进行特征选择","_____no_output_____"]],[["lasso = Lasso(alpha = 0.5,max_iter=5000).fit(X_withLabel, y_data)\r\nmodelLasso = SelectFromModel(lasso, prefit=True)\r\nX_Lasso = modelLasso.transform(X_withLabel)\r\n\r\nLassoIndexMask = modelLasso.get_support() # 获取筛选的mask\r\nvalue = X_withLabel[:,LassoIndexMask].tolist() # 被筛选出来的列的值\r\nLassoIndexMask = LassoIndexMask.tolist() \r\nLassoIndexTrue = []\r\nLassoIndexFalse = []\r\n\r\nfor i in range(len(LassoIndexMask)): # 记录下被筛选的indicator的序号\r\n if (LassoIndexMask[i]==True):\r\n LassoIndexTrue.append(i)\r\n if (LassoIndexMask[i]==False):\r\n LassoIndexFalse.append(i)\r\nprint(\"被筛选后剩下的特征:\")\r\nfor i in range(len(LassoIndexTrue)):\r\n print(i+1,\":\",name[LassoIndexTrue[i]])\r\nprint(\"\\n被筛选后去掉的特征:\")\r\nfor i in range(len(LassoIndexFalse)):\r\n print(i+1,\":\",name[LassoIndexFalse[i]])\r\n\r\ndataFrameOfLassoRegressionFeature = dataFrame\r\nfor i in range(len(LassoIndexFalse)):\r\n dataFrameOfLassoRegressionFeature = dataFrameOfLassoRegressionFeature.drop([name[LassoIndexFalse[i]]],axis=1)\r\ndataFrameOfLassoRegressionFeature.to_excel('/content/drive/MyDrive/DataMining/final/LassoFeatureSelectionOfData.xlsx')\r\ndataFrameOfLassoRegressionFeature","被筛选后剩下的特征:\n1 : FP1-A1 θ 节律, µV\n2 : FP1-A1 α 节律, µV\n3 : FP2-A2 δ 节律,µV\n4 : FP2-A2 θ 节律, µV\n5 : FP2-A2 α 节律, µV\n6 : FP2-A2 β(LF)节律, µV\n7 : F3-A1 α 节律, µV\n8 : F4-A2 α 节律, µV\n9 : FZ-A2 δ 节律,µV\n10 : C3-A1 α 节律, µV\n11 : C4-A2 θ 节律, µV\n12 : C4-A2 α 节律, µV\n13 : C4-A2 β(LF)节律, µV\n14 : CZ-A1 α 节律, µV\n15 : P3-A1 δ 节律,µV\n16 : P4-A2 α 节律, µV\n17 : P4-A2 β(LF)节律, µV\n18 : PZ-A2 δ 节律,µV\n19 : PZ-A2 α 节律, µV\n20 : PZ-A2 β(LF)节律, µV\n21 : O1-A1 δ 节律,µV\n22 : O1-A1 θ 节律, µV\n23 : O1-A1 α 节律, µV\n24 : O2-A2 δ 节律,µV\n25 : O2-A2 θ 节律, µV\n26 : F7-A1 δ 节律,µV\n27 : F8-A2 δ 节律,µV\n28 : T3-A1 θ 节律, µV\n29 : T3-A1 α 节律, µV\n30 : T3-A1 β(LF)节律, µV\n31 : T4-A2 δ 节律,µV\n32 : T4-A2 α 节律, µV\n33 : T4-A2 β(LF)节律, µV\n34 : T5-A1 δ 节律,µV\n35 : T5-A1 θ 节律, µV\n36 : T5-A1 α 节律, µV\n37 : T6-A2 θ 节律, µV\n38 : T6-A2 α 节律, µV\n39 : T6-A2 β(LF)节律, µV\n\n被筛选后去掉的特征:\n1 : FP1-A1 δ 节律,µV\n2 : FP1-A1 β(LF)节律, µV\n3 : F3-A1 δ 节律,µV\n4 : F3-A1 θ 节律, µV\n5 : F3-A1 β(LF)节律, µV\n6 : F4-A2 δ 节律,µV\n7 : F4-A2 θ 节律, µV\n8 : F4-A2 β(LF)节律, µV\n9 : FZ-A2 θ 节律, µV\n10 : FZ-A2 α 节律, µV\n11 : FZ-A2 β(LF)节律, µV\n12 : C3-A1 δ 节律,µV\n13 : C3-A1 θ 节律, µV\n14 : C3-A1 β(LF)节律, µV\n15 : C4-A2 δ 节律,µV\n16 : CZ-A1 δ 节律,µV\n17 : CZ-A1 θ 节律, µV\n18 : CZ-A1 β(LF)节律, µV\n19 : P3-A1 θ 节律, µV\n20 : P3-A1 α 节律, µV\n21 : P3-A1 β(LF)节律, µV\n22 : P4-A2 δ 节律,µV\n23 : P4-A2 θ 节律, µV\n24 : PZ-A2 θ 节律, µV\n25 : O1-A1 β(LF)节律, µV\n26 : O2-A2 α 节律, µV\n27 : O2-A2 β(LF)节律, µV\n28 : F7-A1 θ 节律, µV\n29 : F7-A1 α 节律, µV\n30 : F7-A1 β(LF)节律, µV\n31 : F8-A2 θ 节律, µV\n32 : F8-A2 α 节律, µV\n33 : F8-A2 β(LF)节律, µV\n34 : T3-A1 δ 节律,µV\n35 : T4-A2 θ 节律, µV\n36 : T5-A1 β(LF)节律, µV\n37 : T6-A2 δ 节律,µV\n"]],[["## 利用SVR进行特征选择","_____no_output_____"]],[["lsvr = LinearSVR(C=10,max_iter=10000,loss='squared_epsilon_insensitive',dual=False).fit(X_withLabel, y_data)\r\nmodelLSVR = SelectFromModel(lsvr, prefit=True)\r\nX_LSVR = modelLSVR.transform(X_withLabel)\r\n\r\nSVRIndexMask = modelLSVR.get_support() # 获取筛选的mask\r\nvalue = X_withLabel[:,SVRIndexMask].tolist() # 被筛选出来的列的值\r\nSVRIndexMask = SVRIndexMask.tolist() \r\nSVRIndexTrue = []\r\nSVRIndexFalse = []\r\n\r\nfor i in range(len(SVRIndexMask)): # 记录下被筛选的indicator的序号\r\n if (SVRIndexMask[i]==True):\r\n SVRIndexTrue.append(i)\r\n if (SVRIndexMask[i]==False):\r\n SVRIndexFalse.append(i)\r\nprint(\"被筛选后剩下的特征:\")\r\nfor i in range(len(SVRIndexTrue)):\r\n print(i+1,\":\",name[SVRIndexTrue[i]])\r\nprint(\"\\n被筛选后去掉的特征:\")\r\nfor i in range(len(SVRIndexFalse)):\r\n print(i+1,\":\",name[SVRIndexFalse[i]])\r\n\r\ndataFrameOfLSVRegressionFeature = dataFrame\r\nfor i in range(len(SVRIndexFalse)):\r\n dataFrameOfLSVRegressionFeature = dataFrameOfLSVRegressionFeature.drop([name[SVRIndexFalse[i]]],axis=1)\r\ndataFrameOfLSVRegressionFeature.to_excel('/content/drive/MyDrive/DataMining/final/LSVRFeatureSelectionOfLabel.xlsx')\r\ndataFrameOfLSVRegressionFeature","被筛选后剩下的特征:\n1 : FP1-A1 θ 节律, µV\n2 : FP1-A1 β(LF)节律, µV\n3 : FP2-A2 δ 节律,µV\n4 : FP2-A2 θ 节律, µV\n5 : FP2-A2 β(LF)节律, µV\n6 : F3-A1 θ 节律, µV\n7 : F4-A2 β(LF)节律, µV\n8 : C3-A1 β(LF)节律, µV\n9 : CZ-A1 θ 节律, µV\n10 : CZ-A1 β(LF)节律, µV\n11 : P3-A1 δ 节律,µV\n12 : P3-A1 θ 节律, µV\n13 : P3-A1 α 节律, µV\n14 : P4-A2 δ 节律,µV\n15 : P4-A2 θ 节律, µV\n16 : P4-A2 α 节律, µV\n17 : P4-A2 β(LF)节律, µV\n18 : O1-A1 θ 节律, µV\n19 : O1-A1 β(LF)节律, µV\n20 : O2-A2 θ 节律, µV\n21 : O2-A2 β(LF)节律, µV\n22 : F7-A1 θ 节律, µV\n23 : F7-A1 β(LF)节律, µV\n24 : F8-A2 δ 节律,µV\n25 : F8-A2 α 节律, µV\n26 : F8-A2 β(LF)节律, µV\n27 : T4-A2 β(LF)节律, µV\n28 : T5-A1 β(LF)节律, µV\n29 : T6-A2 δ 节律,µV\n30 : T6-A2 θ 节律, µV\n\n被筛选后去掉的特征:\n1 : FP1-A1 δ 节律,µV\n2 : FP1-A1 α 节律, µV\n3 : FP2-A2 α 节律, µV\n4 : F3-A1 δ 节律,µV\n5 : F3-A1 α 节律, µV\n6 : F3-A1 β(LF)节律, µV\n7 : F4-A2 δ 节律,µV\n8 : F4-A2 θ 节律, µV\n9 : F4-A2 α 节律, µV\n10 : FZ-A2 δ 节律,µV\n11 : FZ-A2 θ 节律, µV\n12 : FZ-A2 α 节律, µV\n13 : FZ-A2 β(LF)节律, µV\n14 : C3-A1 δ 节律,µV\n15 : C3-A1 θ 节律, µV\n16 : C3-A1 α 节律, µV\n17 : C4-A2 δ 节律,µV\n18 : C4-A2 θ 节律, µV\n19 : C4-A2 α 节律, µV\n20 : C4-A2 β(LF)节律, µV\n21 : CZ-A1 δ 节律,µV\n22 : CZ-A1 α 节律, µV\n23 : P3-A1 β(LF)节律, µV\n24 : PZ-A2 δ 节律,µV\n25 : PZ-A2 θ 节律, µV\n26 : PZ-A2 α 节律, µV\n27 : PZ-A2 β(LF)节律, µV\n28 : O1-A1 δ 节律,µV\n29 : O1-A1 α 节律, µV\n30 : O2-A2 δ 节律,µV\n31 : O2-A2 α 节律, µV\n32 : F7-A1 δ 节律,µV\n33 : F7-A1 α 节律, µV\n34 : F8-A2 θ 节律, µV\n35 : T3-A1 δ 节律,µV\n36 : T3-A1 θ 节律, µV\n37 : T3-A1 α 节律, µV\n38 : T3-A1 β(LF)节律, µV\n39 : T4-A2 δ 节律,µV\n40 : T4-A2 θ 节律, µV\n41 : T4-A2 α 节律, µV\n42 : T5-A1 δ 节律,µV\n43 : T5-A1 θ 节律, µV\n44 : T5-A1 α 节律, µV\n45 : T6-A2 α 节律, µV\n46 : T6-A2 β(LF)节律, µV\n"]],[["## 利用树进行特征选择","_____no_output_____"]],[["decisionTree = DecisionTreeRegressor(min_samples_leaf=1,random_state=1).fit(X_withLabel, y_data)\r\nmodelDecisionTree = SelectFromModel(decisionTree, prefit=True)\r\nX_DecisionTree = modelDecisionTree.transform(X_withLabel)\r\n\r\ndecisionTreeIndexMask = modelDecisionTree.get_support() # 获取筛选的mask\r\nvalue = X_withLabel[:,LassoIndexMask].tolist() # 被筛选出来的列的值\r\ndecisionTreeIndexMask = decisionTreeIndexMask.tolist() \r\ndecisionTreeIndexTrue = []\r\ndecisionTreeIndexFalse = []\r\n\r\nfor i in range(len(decisionTreeIndexMask)): # 记录下被筛选的indicator的序号\r\n if (decisionTreeIndexMask[i]==True):\r\n decisionTreeIndexTrue.append(i)\r\n if (decisionTreeIndexMask[i]==False):\r\n decisionTreeIndexFalse.append(i)\r\nprint(\"被筛选后剩下的特征:\")\r\nfor i in range(len(decisionTreeIndexTrue)):\r\n print(i+1,\":\",name[decisionTreeIndexTrue[i]])\r\nprint(\"\\n被筛选后去掉的特征:\")\r\nfor i in range(len(decisionTreeIndexFalse)):\r\n print(i+1,\":\",name[decisionTreeIndexFalse[i]])\r\n\r\ndataFrameOfDecisionTreeRegressionFeature = dataFrame\r\nfor i in range(len(decisionTreeIndexFalse)):\r\n dataFrameOfDecisionTreeRegressionFeature = dataFrameOfDecisionTreeRegressionFeature.drop([name[decisionTreeIndexFalse[i]]],axis=1)\r\ndataFrameOfDecisionTreeRegressionFeature.to_excel('/content/drive/MyDrive/DataMining/final/DecisionTreeFeatureSelectionOfData.xlsx')\r\ndataFrameOfDecisionTreeRegressionFeature","被筛选后剩下的特征:\n1 : F4-A2 θ 节律, µV\n2 : F4-A2 α 节律, µV\n3 : FZ-A2 θ 节律, µV\n4 : FZ-A2 β(LF)节律, µV\n5 : C3-A1 θ 节律, µV\n6 : C3-A1 β(LF)节律, µV\n7 : CZ-A1 β(LF)节律, µV\n8 : P3-A1 δ 节律,µV\n9 : P3-A1 β(LF)节律, µV\n10 : PZ-A2 α 节律, µV\n11 : O2-A2 δ 节律,µV\n12 : O2-A2 α 节律, µV\n13 : F8-A2 δ 节律,µV\n14 : T3-A1 θ 节律, µV\n15 : T5-A1 β(LF)节律, µV\n16 : T6-A2 α 节律, µV\n\n被筛选后去掉的特征:\n1 : FP1-A1 δ 节律,µV\n2 : FP1-A1 θ 节律, µV\n3 : FP1-A1 α 节律, µV\n4 : FP1-A1 β(LF)节律, µV\n5 : FP2-A2 δ 节律,µV\n6 : FP2-A2 θ 节律, µV\n7 : FP2-A2 α 节律, µV\n8 : FP2-A2 β(LF)节律, µV\n9 : F3-A1 δ 节律,µV\n10 : F3-A1 θ 节律, µV\n11 : F3-A1 α 节律, µV\n12 : F3-A1 β(LF)节律, µV\n13 : F4-A2 δ 节律,µV\n14 : F4-A2 β(LF)节律, µV\n15 : FZ-A2 δ 节律,µV\n16 : FZ-A2 α 节律, µV\n17 : C3-A1 δ 节律,µV\n18 : C3-A1 α 节律, µV\n19 : C4-A2 δ 节律,µV\n20 : C4-A2 θ 节律, µV\n21 : C4-A2 α 节律, µV\n22 : C4-A2 β(LF)节律, µV\n23 : CZ-A1 δ 节律,µV\n24 : CZ-A1 θ 节律, µV\n25 : CZ-A1 α 节律, µV\n26 : P3-A1 θ 节律, µV\n27 : P3-A1 α 节律, µV\n28 : P4-A2 δ 节律,µV\n29 : P4-A2 θ 节律, µV\n30 : P4-A2 α 节律, µV\n31 : P4-A2 β(LF)节律, µV\n32 : PZ-A2 δ 节律,µV\n33 : PZ-A2 θ 节律, µV\n34 : PZ-A2 β(LF)节律, µV\n35 : O1-A1 δ 节律,µV\n36 : O1-A1 θ 节律, µV\n37 : O1-A1 α 节律, µV\n38 : O1-A1 β(LF)节律, µV\n39 : O2-A2 θ 节律, µV\n40 : O2-A2 β(LF)节律, µV\n41 : F7-A1 δ 节律,µV\n42 : F7-A1 θ 节律, µV\n43 : F7-A1 α 节律, µV\n44 : F7-A1 β(LF)节律, µV\n45 : F8-A2 θ 节律, µV\n46 : F8-A2 α 节律, µV\n47 : F8-A2 β(LF)节律, µV\n48 : T3-A1 δ 节律,µV\n49 : T3-A1 α 节律, µV\n50 : T3-A1 β(LF)节律, µV\n51 : T4-A2 δ 节律,µV\n52 : T4-A2 θ 节律, µV\n53 : T4-A2 α 节律, µV\n54 : T4-A2 β(LF)节律, µV\n55 : T5-A1 δ 节律,µV\n56 : T5-A1 θ 节律, µV\n57 : T5-A1 α 节律, µV\n58 : T6-A2 δ 节律,µV\n59 : T6-A2 θ 节律, µV\n60 : T6-A2 β(LF)节律, µV\n"]],[["## 利用随机森林进行特征选择","_____no_output_____"]],[["randomForest = RandomForestRegressor().fit(X_withLabel, y_data)\r\nmodelrandomForest = SelectFromModel(randomForest, prefit=True)\r\nX_randomForest = modelrandomForest.transform(X_withLabel)\r\n\r\nrandomForestIndexMask = modelrandomForest.get_support() # 获取筛选的mask\r\nvalue = X_withLabel[:,randomForestIndexMask].tolist() # 被筛选出来的列的值\r\nrandomForestIndexMask = randomForestIndexMask.tolist() \r\nrandomForestIndexTrue = []\r\nrandomForestIndexFalse = []\r\n\r\nfor i in range(len(randomForestIndexMask)): # 记录下被筛选的indicator的序号\r\n if (randomForestIndexMask[i]==True):\r\n randomForestIndexTrue.append(i)\r\n if (randomForestIndexMask[i]==False):\r\n randomForestIndexFalse.append(i)\r\nprint(\"被筛选后剩下的特征:\")\r\nfor i in range(len(randomForestIndexTrue)):\r\n print(i+1,\":\",name[randomForestIndexTrue[i]])\r\nprint(\"\\n被筛选后去掉的特征:\")\r\nfor i in range(len(randomForestIndexFalse)):\r\n print(i+1,\":\",name[randomForestIndexFalse[i]])\r\n\r\ndataFrameOfRandomForestRegressionFeature = dataFrame\r\nfor i in range(len(randomForestIndexFalse)):\r\n dataFrameOfRandomForestRegressionFeature = dataFrameOfRandomForestRegressionFeature.drop([name[randomForestIndexFalse[i]]],axis=1)\r\ndataFrameOfRandomForestRegressionFeature.to_excel('/content/drive/MyDrive/DataMining/final/RandomForestFeatureSelectionOfData.xlsx')\r\ndataFrameOfRandomForestRegressionFeature","被筛选后剩下的特征:\n1 : FP1-A1 θ 节律, µV\n2 : FP1-A1 α 节律, µV\n3 : FP2-A2 θ 节律, µV\n4 : FP2-A2 β(LF)节律, µV\n5 : F3-A1 θ 节律, µV\n6 : F4-A2 θ 节律, µV\n7 : C3-A1 θ 节律, µV\n8 : C4-A2 δ 节律,µV\n9 : C4-A2 θ 节律, µV\n10 : P3-A1 δ 节律,µV\n11 : P4-A2 θ 节律, µV\n12 : PZ-A2 β(LF)节律, µV\n13 : O1-A1 θ 节律, µV\n14 : O2-A2 δ 节律,µV\n15 : O2-A2 θ 节律, µV\n16 : O2-A2 β(LF)节律, µV\n17 : F7-A1 θ 节律, µV\n18 : F8-A2 δ 节律,µV\n19 : F8-A2 θ 节律, µV\n20 : F8-A2 α 节律, µV\n21 : T3-A1 θ 节律, µV\n22 : T3-A1 β(LF)节律, µV\n23 : T4-A2 δ 节律,µV\n24 : T4-A2 θ 节律, µV\n25 : T4-A2 β(LF)节律, µV\n26 : T5-A1 θ 节律, µV\n27 : T5-A1 β(LF)节律, µV\n28 : T6-A2 θ 节律, µV\n29 : T6-A2 β(LF)节律, µV\n\n被筛选后去掉的特征:\n1 : FP1-A1 δ 节律,µV\n2 : FP1-A1 β(LF)节律, µV\n3 : FP2-A2 δ 节律,µV\n4 : FP2-A2 α 节律, µV\n5 : F3-A1 δ 节律,µV\n6 : F3-A1 α 节律, µV\n7 : F3-A1 β(LF)节律, µV\n8 : F4-A2 δ 节律,µV\n9 : F4-A2 α 节律, µV\n10 : F4-A2 β(LF)节律, µV\n11 : FZ-A2 δ 节律,µV\n12 : FZ-A2 θ 节律, µV\n13 : FZ-A2 α 节律, µV\n14 : FZ-A2 β(LF)节律, µV\n15 : C3-A1 δ 节律,µV\n16 : C3-A1 α 节律, µV\n17 : C3-A1 β(LF)节律, µV\n18 : C4-A2 α 节律, µV\n19 : C4-A2 β(LF)节律, µV\n20 : CZ-A1 δ 节律,µV\n21 : CZ-A1 θ 节律, µV\n22 : CZ-A1 α 节律, µV\n23 : CZ-A1 β(LF)节律, µV\n24 : P3-A1 θ 节律, µV\n25 : P3-A1 α 节律, µV\n26 : P3-A1 β(LF)节律, µV\n27 : P4-A2 δ 节律,µV\n28 : P4-A2 α 节律, µV\n29 : P4-A2 β(LF)节律, µV\n30 : PZ-A2 δ 节律,µV\n31 : PZ-A2 θ 节律, µV\n32 : PZ-A2 α 节律, µV\n33 : O1-A1 δ 节律,µV\n34 : O1-A1 α 节律, µV\n35 : O1-A1 β(LF)节律, µV\n36 : O2-A2 α 节律, µV\n37 : F7-A1 δ 节律,µV\n38 : F7-A1 α 节律, µV\n39 : F7-A1 β(LF)节律, µV\n40 : F8-A2 β(LF)节律, µV\n41 : T3-A1 δ 节律,µV\n42 : T3-A1 α 节律, µV\n43 : T4-A2 α 节律, µV\n44 : T5-A1 δ 节律,µV\n45 : T5-A1 α 节律, µV\n46 : T6-A2 δ 节律,µV\n47 : T6-A2 α 节律, µV\n"]],[["## 利用GBDT进行特征选择","_____no_output_____"]],[["GBDTRegressor = GradientBoostingRegressor().fit(X_withLabel, y_data)\r\nmodelGBDTRegressor = SelectFromModel(GBDTRegressor, prefit=True)\r\nX_GBDTRegressor = modelGBDTRegressor.transform(X_withLabel)\r\n\r\nGBDTRegressorIndexMask = modelGBDTRegressor.get_support() # 获取筛选的mask\r\nvalue = X_withLabel[:,GBDTRegressorIndexMask].tolist() # 被筛选出来的列的值\r\nGBDTRegressorIndexMask = GBDTRegressorIndexMask.tolist() \r\nGBDTRegressorIndexTrue = []\r\nGBDTRegressorIndexFalse = []\r\n\r\nfor i in range(len(GBDTRegressorIndexMask)): # 记录下被筛选的indicator的序号\r\n if (GBDTRegressorIndexMask[i]==True):\r\n GBDTRegressorIndexTrue.append(i)\r\n if (GBDTRegressorIndexMask[i]==False):\r\n GBDTRegressorIndexFalse.append(i)\r\nprint(\"被筛选后剩下的特征:\")\r\nfor i in range(len(GBDTRegressorIndexTrue)):\r\n print(i+1,\":\",name[GBDTRegressorIndexTrue[i]])\r\nprint(\"\\n被筛选后去掉的特征:\")\r\nfor i in range(len(GBDTRegressorIndexFalse)):\r\n print(i+1,\":\",name[GBDTRegressorIndexFalse[i]])\r\n\r\ndataFrameOfGBDTRegressionFeature = dataFrame\r\nfor i in range(len(GBDTRegressorIndexFalse)):\r\n dataFrameOfGBDTRegressionFeature = dataFrameOfGBDTRegressionFeature.drop([name[GBDTRegressorIndexFalse[i]]],axis=1)\r\ndataFrameOfGBDTRegressionFeature.to_excel('/content/drive/MyDrive/DataMining/final/GBDTRegressorFeatureSelectionOfData.xlsx')\r\ndataFrameOfGBDTRegressionFeature","被筛选后剩下的特征:\n1 : FP2-A2 θ 节律, µV\n2 : FP2-A2 β(LF)节律, µV\n3 : F3-A1 θ 节律, µV\n4 : C3-A1 δ 节律,µV\n5 : C3-A1 θ 节律, µV\n6 : C4-A2 δ 节律,µV\n7 : C4-A2 θ 节律, µV\n8 : CZ-A1 θ 节律, µV\n9 : P3-A1 δ 节律,µV\n10 : P3-A1 α 节律, µV\n11 : P4-A2 θ 节律, µV\n12 : P4-A2 α 节律, µV\n13 : PZ-A2 α 节律, µV\n14 : PZ-A2 β(LF)节律, µV\n15 : O1-A1 θ 节律, µV\n16 : O2-A2 δ 节律,µV\n17 : O2-A2 θ 节律, µV\n18 : O2-A2 β(LF)节律, µV\n19 : F8-A2 δ 节律,µV\n20 : F8-A2 α 节律, µV\n21 : F8-A2 β(LF)节律, µV\n22 : T3-A1 θ 节律, µV\n23 : T4-A2 δ 节律,µV\n24 : T4-A2 θ 节律, µV\n25 : T4-A2 β(LF)节律, µV\n26 : T6-A2 θ 节律, µV\n27 : T6-A2 β(LF)节律, µV\n\n被筛选后去掉的特征:\n1 : FP1-A1 δ 节律,µV\n2 : FP1-A1 θ 节律, µV\n3 : FP1-A1 α 节律, µV\n4 : FP1-A1 β(LF)节律, µV\n5 : FP2-A2 δ 节律,µV\n6 : FP2-A2 α 节律, µV\n7 : F3-A1 δ 节律,µV\n8 : F3-A1 α 节律, µV\n9 : F3-A1 β(LF)节律, µV\n10 : F4-A2 δ 节律,µV\n11 : F4-A2 θ 节律, µV\n12 : F4-A2 α 节律, µV\n13 : F4-A2 β(LF)节律, µV\n14 : FZ-A2 δ 节律,µV\n15 : FZ-A2 θ 节律, µV\n16 : FZ-A2 α 节律, µV\n17 : FZ-A2 β(LF)节律, µV\n18 : C3-A1 α 节律, µV\n19 : C3-A1 β(LF)节律, µV\n20 : C4-A2 α 节律, µV\n21 : C4-A2 β(LF)节律, µV\n22 : CZ-A1 δ 节律,µV\n23 : CZ-A1 α 节律, µV\n24 : CZ-A1 β(LF)节律, µV\n25 : P3-A1 θ 节律, µV\n26 : P3-A1 β(LF)节律, µV\n27 : P4-A2 δ 节律,µV\n28 : P4-A2 β(LF)节律, µV\n29 : PZ-A2 δ 节律,µV\n30 : PZ-A2 θ 节律, µV\n31 : O1-A1 δ 节律,µV\n32 : O1-A1 α 节律, µV\n33 : O1-A1 β(LF)节律, µV\n34 : O2-A2 α 节律, µV\n35 : F7-A1 δ 节律,µV\n36 : F7-A1 θ 节律, µV\n37 : F7-A1 α 节律, µV\n38 : F7-A1 β(LF)节律, µV\n39 : F8-A2 θ 节律, µV\n40 : T3-A1 δ 节律,µV\n41 : T3-A1 α 节律, µV\n42 : T3-A1 β(LF)节律, µV\n43 : T4-A2 α 节律, µV\n44 : T5-A1 δ 节律,µV\n45 : T5-A1 θ 节律, µV\n46 : T5-A1 α 节律, µV\n47 : T5-A1 β(LF)节律, µV\n48 : T6-A2 δ 节律,µV\n49 : T6-A2 α 节律, µV\n"]],[["# 分类","_____no_output_____"],["## 利用Lasso进行特征选择","_____no_output_____"]],[["lasso = Lasso(alpha = 0.3,max_iter=5000).fit(X_withLabel, y_label)\r\nmodelLasso = SelectFromModel(lasso, prefit=True)\r\nX_Lasso = modelLasso.transform(X_withLabel)\r\n\r\nLassoIndexMask = modelLasso.get_support() # 获取筛选的mask\r\nvalue = X_withLabel[:,LassoIndexMask].tolist() # 被筛选出来的列的值\r\nLassoIndexMask = LassoIndexMask.tolist() \r\nLassoIndexTrue = []\r\nLassoIndexFalse = []\r\n\r\nfor i in range(len(LassoIndexMask)): # 记录下被筛选的indicator的序号\r\n if (LassoIndexMask[i]==True):\r\n LassoIndexTrue.append(i)\r\n if (LassoIndexMask[i]==False):\r\n LassoIndexFalse.append(i)\r\nprint(\"被筛选后剩下的特征:\")\r\nfor i in range(len(LassoIndexTrue)):\r\n print(i+1,\":\",name[LassoIndexTrue[i]])\r\nprint(\"\\n被筛选后去掉的特征:\")\r\nfor i in range(len(LassoIndexFalse)):\r\n print(i+1,\":\",name[LassoIndexFalse[i]])\r\n\r\ndataFrameOfLassoClassificationFeature = dataFrame\r\nfor i in range(len(LassoIndexFalse)):\r\n dataFrameOfLassoClassificationFeature = dataFrameOfLassoClassificationFeature.drop([name[LassoIndexFalse[i]]],axis=1)\r\ndataFrameOfLassoClassificationFeature.to_excel('/content/drive/MyDrive/DataMining/final/LassoFeatureSelectionOfLabel.xlsx')\r\ndataFrameOfLassoClassificationFeature","被筛选后剩下的特征:\n1 : FP1-A1 α 节律, µV\n2 : FZ-A2 δ 节律,µV\n3 : C4-A2 δ 节律,µV\n4 : CZ-A1 α 节律, µV\n5 : P3-A1 δ 节律,µV\n6 : P4-A2 α 节律, µV\n7 : PZ-A2 δ 节律,µV\n8 : O2-A2 δ 节律,µV\n9 : F7-A1 δ 节律,µV\n10 : F7-A1 α 节律, µV\n11 : T3-A1 α 节律, µV\n12 : T4-A2 δ 节律,µV\n13 : T4-A2 α 节律, µV\n14 : T5-A1 δ 节律,µV\n\n被筛选后去掉的特征:\n1 : FP1-A1 δ 节律,µV\n2 : FP1-A1 θ 节律, µV\n3 : FP1-A1 β(LF)节律, µV\n4 : FP2-A2 δ 节律,µV\n5 : FP2-A2 θ 节律, µV\n6 : FP2-A2 α 节律, µV\n7 : FP2-A2 β(LF)节律, µV\n8 : F3-A1 δ 节律,µV\n9 : F3-A1 θ 节律, µV\n10 : F3-A1 α 节律, µV\n11 : F3-A1 β(LF)节律, µV\n12 : F4-A2 δ 节律,µV\n13 : F4-A2 θ 节律, µV\n14 : F4-A2 α 节律, µV\n15 : F4-A2 β(LF)节律, µV\n16 : FZ-A2 θ 节律, µV\n17 : FZ-A2 α 节律, µV\n18 : FZ-A2 β(LF)节律, µV\n19 : C3-A1 δ 节律,µV\n20 : C3-A1 θ 节律, µV\n21 : C3-A1 α 节律, µV\n22 : C3-A1 β(LF)节律, µV\n23 : C4-A2 θ 节律, µV\n24 : C4-A2 α 节律, µV\n25 : C4-A2 β(LF)节律, µV\n26 : CZ-A1 δ 节律,µV\n27 : CZ-A1 θ 节律, µV\n28 : CZ-A1 β(LF)节律, µV\n29 : P3-A1 θ 节律, µV\n30 : P3-A1 α 节律, µV\n31 : P3-A1 β(LF)节律, µV\n32 : P4-A2 δ 节律,µV\n33 : P4-A2 θ 节律, µV\n34 : P4-A2 β(LF)节律, µV\n35 : PZ-A2 θ 节律, µV\n36 : PZ-A2 α 节律, µV\n37 : PZ-A2 β(LF)节律, µV\n38 : O1-A1 δ 节律,µV\n39 : O1-A1 θ 节律, µV\n40 : O1-A1 α 节律, µV\n41 : O1-A1 β(LF)节律, µV\n42 : O2-A2 θ 节律, µV\n43 : O2-A2 α 节律, µV\n44 : O2-A2 β(LF)节律, µV\n45 : F7-A1 θ 节律, µV\n46 : F7-A1 β(LF)节律, µV\n47 : F8-A2 δ 节律,µV\n48 : F8-A2 θ 节律, µV\n49 : F8-A2 α 节律, µV\n50 : F8-A2 β(LF)节律, µV\n51 : T3-A1 δ 节律,µV\n52 : T3-A1 θ 节律, µV\n53 : T3-A1 β(LF)节律, µV\n54 : T4-A2 θ 节律, µV\n55 : T4-A2 β(LF)节律, µV\n56 : T5-A1 θ 节律, µV\n57 : T5-A1 α 节律, µV\n58 : T5-A1 β(LF)节律, µV\n59 : T6-A2 δ 节律,µV\n60 : T6-A2 θ 节律, µV\n61 : T6-A2 α 节律, µV\n62 : T6-A2 β(LF)节律, µV\n"]],[["## 利用SVC进行特征选择","_____no_output_____"]],[["lsvc = LinearSVC(C=10,max_iter=10000,dual=False).fit(X_withLabel, y_label.ravel())\r\nmodelLSVC = SelectFromModel(lsvc, prefit=True)\r\nX_LSVR = modelLSVR.transform(X_withLabel)\r\n\r\nSVCIndexMask = modelLSVC.get_support() # 获取筛选的mask\r\nvalue = X_withLabel[:,SVCIndexMask].tolist() # 被筛选出来的列的值\r\nSVCIndexMask = SVCIndexMask.tolist() \r\nSVCIndexTrue = []\r\nSVCIndexFalse = []\r\n\r\nfor i in range(len(SVCIndexMask)): # 记录下被筛选的indicator的序号\r\n if (SVCIndexMask[i]==True):\r\n SVCIndexTrue.append(i)\r\n if (SVCIndexMask[i]==False):\r\n SVCIndexFalse.append(i)\r\nprint(\"被筛选后剩下的特征:\")\r\nfor i in range(len(SVCIndexTrue)):\r\n print(i+1,\":\",name[SVCIndexTrue[i]])\r\nprint(\"\\n被筛选后去掉的特征:\")\r\nfor i in range(len(SVCIndexFalse)):\r\n print(i+1,\":\",name[SVCIndexFalse[i]])\r\n\r\ndataFrameOfLSVClassificationFeature = dataFrame\r\nfor i in range(len(SVCIndexFalse)):\r\n dataFrameOfLSVClassificationFeature = dataFrameOfLSVClassificationFeature.drop([name[SVCIndexFalse[i]]],axis=1)\r\ndataFrameOfLSVClassificationFeature.to_excel('/content/drive/MyDrive/DataMining/final/LSVCFeatureSelectionOfLabel.xlsx')\r\ndataFrameOfLSVClassificationFeature","被筛选后剩下的特征:\n1 : FP1-A1 θ 节律, µV\n2 : FP2-A2 θ 节律, µV\n3 : FP2-A2 α 节律, µV\n4 : FP2-A2 β(LF)节律, µV\n5 : FZ-A2 β(LF)节律, µV\n6 : C3-A1 θ 节律, µV\n7 : C3-A1 β(LF)节律, µV\n8 : C4-A2 δ 节律,µV\n9 : C4-A2 θ 节律, µV\n10 : C4-A2 α 节律, µV\n11 : CZ-A1 δ 节律,µV\n12 : CZ-A1 θ 节律, µV\n13 : CZ-A1 α 节律, µV\n14 : P3-A1 β(LF)节律, µV\n15 : P4-A2 θ 节律, µV\n16 : P4-A2 β(LF)节律, µV\n17 : PZ-A2 β(LF)节律, µV\n18 : O2-A2 δ 节律,µV\n19 : O2-A2 θ 节律, µV\n20 : O2-A2 α 节律, µV\n21 : F7-A1 θ 节律, µV\n22 : F7-A1 α 节律, µV\n23 : F7-A1 β(LF)节律, µV\n24 : F8-A2 β(LF)节律, µV\n25 : T4-A2 δ 节律,µV\n26 : T4-A2 θ 节律, µV\n27 : T4-A2 α 节律, µV\n28 : T4-A2 β(LF)节律, µV\n29 : T5-A1 θ 节律, µV\n30 : T6-A2 δ 节律,µV\n31 : T6-A2 α 节律, µV\n\n被筛选后去掉的特征:\n1 : FP1-A1 δ 节律,µV\n2 : FP1-A1 α 节律, µV\n3 : FP1-A1 β(LF)节律, µV\n4 : FP2-A2 δ 节律,µV\n5 : F3-A1 δ 节律,µV\n6 : F3-A1 θ 节律, µV\n7 : F3-A1 α 节律, µV\n8 : F3-A1 β(LF)节律, µV\n9 : F4-A2 δ 节律,µV\n10 : F4-A2 θ 节律, µV\n11 : F4-A2 α 节律, µV\n12 : F4-A2 β(LF)节律, µV\n13 : FZ-A2 δ 节律,µV\n14 : FZ-A2 θ 节律, µV\n15 : FZ-A2 α 节律, µV\n16 : C3-A1 δ 节律,µV\n17 : C3-A1 α 节律, µV\n18 : C4-A2 β(LF)节律, µV\n19 : CZ-A1 β(LF)节律, µV\n20 : P3-A1 δ 节律,µV\n21 : P3-A1 θ 节律, µV\n22 : P3-A1 α 节律, µV\n23 : P4-A2 δ 节律,µV\n24 : P4-A2 α 节律, µV\n25 : PZ-A2 δ 节律,µV\n26 : PZ-A2 θ 节律, µV\n27 : PZ-A2 α 节律, µV\n28 : O1-A1 δ 节律,µV\n29 : O1-A1 θ 节律, µV\n30 : O1-A1 α 节律, µV\n31 : O1-A1 β(LF)节律, µV\n32 : O2-A2 β(LF)节律, µV\n33 : F7-A1 δ 节律,µV\n34 : F8-A2 δ 节律,µV\n35 : F8-A2 θ 节律, µV\n36 : F8-A2 α 节律, µV\n37 : T3-A1 δ 节律,µV\n38 : T3-A1 θ 节律, µV\n39 : T3-A1 α 节律, µV\n40 : T3-A1 β(LF)节律, µV\n41 : T5-A1 δ 节律,µV\n42 : T5-A1 α 节律, µV\n43 : T5-A1 β(LF)节律, µV\n44 : T6-A2 θ 节律, µV\n45 : T6-A2 β(LF)节律, µV\n"]],[["## 利用树进行特征选择","_____no_output_____"]],[["decisionTree = DecisionTreeClassifier(random_state=1).fit(X_withLabel, y_label)\r\nmodelDecisionTree = SelectFromModel(decisionTree, prefit=True)\r\nX_DecisionTree = modelDecisionTree.transform(X_withLabel)\r\n\r\ndecisionTreeIndexMask = modelDecisionTree.get_support() # 获取筛选的mask\r\nvalue = X_withLabel[:,LassoIndexMask].tolist() # 被筛选出来的列的值\r\ndecisionTreeIndexMask = decisionTreeIndexMask.tolist() \r\ndecisionTreeIndexTrue = []\r\ndecisionTreeIndexFalse = []\r\n\r\nfor i in range(len(decisionTreeIndexMask)): # 记录下被筛选的indicator的序号\r\n if (decisionTreeIndexMask[i]==True):\r\n decisionTreeIndexTrue.append(i)\r\n if (decisionTreeIndexMask[i]==False):\r\n decisionTreeIndexFalse.append(i)\r\nprint(\"被筛选后剩下的特征:\")\r\nfor i in range(len(decisionTreeIndexTrue)):\r\n print(i+1,\":\",name[decisionTreeIndexTrue[i]])\r\nprint(\"\\n被筛选后去掉的特征:\")\r\nfor i in range(len(decisionTreeIndexFalse)):\r\n print(i+1,\":\",name[decisionTreeIndexFalse[i]])\r\n\r\ndataFrameOfDecisionTreeClassificationFeature = dataFrame\r\nfor i in range(len(decisionTreeIndexFalse)):\r\n dataFrameOfDecisionTreeClassificationFeature = dataFrameOfDecisionTreeClassificationFeature.drop([name[decisionTreeIndexFalse[i]]],axis=1)\r\ndataFrameOfDecisionTreeClassificationFeature.to_excel('/content/drive/MyDrive/DataMining/final/DecisionTreeFeatureSelectionOfLabel.xlsx')\r\ndataFrameOfDecisionTreeClassificationFeature","被筛选后剩下的特征:\n1 : FP1-A1 δ 节律,µV\n2 : FP1-A1 α 节律, µV\n3 : F3-A1 θ 节律, µV\n4 : C3-A1 θ 节律, µV\n5 : CZ-A1 δ 节律,µV\n6 : CZ-A1 β(LF)节律, µV\n7 : P3-A1 α 节律, µV\n8 : PZ-A2 β(LF)节律, µV\n9 : O2-A2 δ 节律,µV\n10 : O2-A2 β(LF)节律, µV\n11 : F7-A1 θ 节律, µV\n12 : T4-A2 δ 节律,µV\n13 : T5-A1 α 节律, µV\n14 : T6-A2 α 节律, µV\n\n被筛选后去掉的特征:\n1 : FP1-A1 θ 节律, µV\n2 : FP1-A1 β(LF)节律, µV\n3 : FP2-A2 δ 节律,µV\n4 : FP2-A2 θ 节律, µV\n5 : FP2-A2 α 节律, µV\n6 : FP2-A2 β(LF)节律, µV\n7 : F3-A1 δ 节律,µV\n8 : F3-A1 α 节律, µV\n9 : F3-A1 β(LF)节律, µV\n10 : F4-A2 δ 节律,µV\n11 : F4-A2 θ 节律, µV\n12 : F4-A2 α 节律, µV\n13 : F4-A2 β(LF)节律, µV\n14 : FZ-A2 δ 节律,µV\n15 : FZ-A2 θ 节律, µV\n16 : FZ-A2 α 节律, µV\n17 : FZ-A2 β(LF)节律, µV\n18 : C3-A1 δ 节律,µV\n19 : C3-A1 α 节律, µV\n20 : C3-A1 β(LF)节律, µV\n21 : C4-A2 δ 节律,µV\n22 : C4-A2 θ 节律, µV\n23 : C4-A2 α 节律, µV\n24 : C4-A2 β(LF)节律, µV\n25 : CZ-A1 θ 节律, µV\n26 : CZ-A1 α 节律, µV\n27 : P3-A1 δ 节律,µV\n28 : P3-A1 θ 节律, µV\n29 : P3-A1 β(LF)节律, µV\n30 : P4-A2 δ 节律,µV\n31 : P4-A2 θ 节律, µV\n32 : P4-A2 α 节律, µV\n33 : P4-A2 β(LF)节律, µV\n34 : PZ-A2 δ 节律,µV\n35 : PZ-A2 θ 节律, µV\n36 : PZ-A2 α 节律, µV\n37 : O1-A1 δ 节律,µV\n38 : O1-A1 θ 节律, µV\n39 : O1-A1 α 节律, µV\n40 : O1-A1 β(LF)节律, µV\n41 : O2-A2 θ 节律, µV\n42 : O2-A2 α 节律, µV\n43 : F7-A1 δ 节律,µV\n44 : F7-A1 α 节律, µV\n45 : F7-A1 β(LF)节律, µV\n46 : F8-A2 δ 节律,µV\n47 : F8-A2 θ 节律, µV\n48 : F8-A2 α 节律, µV\n49 : F8-A2 β(LF)节律, µV\n50 : T3-A1 δ 节律,µV\n51 : T3-A1 θ 节律, µV\n52 : T3-A1 α 节律, µV\n53 : T3-A1 β(LF)节律, µV\n54 : T4-A2 θ 节律, µV\n55 : T4-A2 α 节律, µV\n56 : T4-A2 β(LF)节律, µV\n57 : T5-A1 δ 节律,µV\n58 : T5-A1 θ 节律, µV\n59 : T5-A1 β(LF)节律, µV\n60 : T6-A2 δ 节律,µV\n61 : T6-A2 θ 节律, µV\n62 : T6-A2 β(LF)节律, µV\n"]],[["## 利用随机森林进行特征选择","_____no_output_____"]],[["randomForest = RandomForestRegressor().fit(X_withLabel, y_label)\r\nmodelrandomForest = SelectFromModel(randomForest, prefit=True)\r\nX_randomForest = modelrandomForest.transform(X_withLabel)\r\n\r\nrandomForestIndexMask = modelrandomForest.get_support() # 获取筛选的mask\r\nvalue = X_withLabel[:,randomForestIndexMask].tolist() # 被筛选出来的列的值\r\nrandomForestIndexMask = randomForestIndexMask.tolist() \r\nrandomForestIndexTrue = []\r\nrandomForestIndexFalse = []\r\n\r\nfor i in range(len(randomForestIndexMask)): # 记录下被筛选的indicator的序号\r\n if (randomForestIndexMask[i]==True):\r\n randomForestIndexTrue.append(i)\r\n if (randomForestIndexMask[i]==False):\r\n randomForestIndexFalse.append(i)\r\nprint(\"被筛选后剩下的特征:\")\r\nfor i in range(len(randomForestIndexTrue)):\r\n print(i+1,\":\",name[randomForestIndexTrue[i]])\r\nprint(\"\\n被筛选后去掉的特征:\")\r\nfor i in range(len(randomForestIndexFalse)):\r\n print(i+1,\":\",name[randomForestIndexFalse[i]])\r\n\r\ndataFrameOfRandomForestClassificationFeature = dataFrame\r\nfor i in range(len(randomForestIndexFalse)):\r\n dataFrameOfRandomForestClassificationFeature = dataFrameOfRandomForestClassificationFeature.drop([name[randomForestIndexFalse[i]]],axis=1)\r\ndataFrameOfRandomForestClassificationFeature.to_excel('/content/drive/MyDrive/DataMining/final/RandomForestFeatureSelectionOfLabel.xlsx')\r\ndataFrameOfRandomForestClassificationFeature","被筛选后剩下的特征:\n1 : FP1-A1 θ 节律, µV\n2 : FP2-A2 β(LF)节律, µV\n3 : F4-A2 α 节律, µV\n4 : F4-A2 β(LF)节律, µV\n5 : FZ-A2 β(LF)节律, µV\n6 : C3-A1 β(LF)节律, µV\n7 : C4-A2 δ 节律,µV\n8 : C4-A2 θ 节律, µV\n9 : C4-A2 α 节律, µV\n10 : CZ-A1 α 节律, µV\n11 : P3-A1 δ 节律,µV\n12 : P3-A1 α 节律, µV\n13 : P3-A1 β(LF)节律, µV\n14 : P4-A2 δ 节律,µV\n15 : P4-A2 θ 节律, µV\n16 : P4-A2 α 节律, µV\n17 : PZ-A2 β(LF)节律, µV\n18 : O2-A2 δ 节律,µV\n19 : O2-A2 β(LF)节律, µV\n20 : F7-A1 θ 节律, µV\n21 : F8-A2 α 节律, µV\n22 : F8-A2 β(LF)节律, µV\n23 : T3-A1 θ 节律, µV\n24 : T4-A2 δ 节律,µV\n25 : T4-A2 θ 节律, µV\n26 : T4-A2 α 节律, µV\n27 : T4-A2 β(LF)节律, µV\n28 : T5-A1 δ 节律,µV\n29 : T5-A1 θ 节律, µV\n30 : T5-A1 β(LF)节律, µV\n31 : T6-A2 θ 节律, µV\n32 : T6-A2 α 节律, µV\n33 : T6-A2 β(LF)节律, µV\n\n被筛选后去掉的特征:\n1 : FP1-A1 δ 节律,µV\n2 : FP1-A1 α 节律, µV\n3 : FP1-A1 β(LF)节律, µV\n4 : FP2-A2 δ 节律,µV\n5 : FP2-A2 θ 节律, µV\n6 : FP2-A2 α 节律, µV\n7 : F3-A1 δ 节律,µV\n8 : F3-A1 θ 节律, µV\n9 : F3-A1 α 节律, µV\n10 : F3-A1 β(LF)节律, µV\n11 : F4-A2 δ 节律,µV\n12 : F4-A2 θ 节律, µV\n13 : FZ-A2 δ 节律,µV\n14 : FZ-A2 θ 节律, µV\n15 : FZ-A2 α 节律, µV\n16 : C3-A1 δ 节律,µV\n17 : C3-A1 θ 节律, µV\n18 : C3-A1 α 节律, µV\n19 : C4-A2 β(LF)节律, µV\n20 : CZ-A1 δ 节律,µV\n21 : CZ-A1 θ 节律, µV\n22 : CZ-A1 β(LF)节律, µV\n23 : P3-A1 θ 节律, µV\n24 : P4-A2 β(LF)节律, µV\n25 : PZ-A2 δ 节律,µV\n26 : PZ-A2 θ 节律, µV\n27 : PZ-A2 α 节律, µV\n28 : O1-A1 δ 节律,µV\n29 : O1-A1 θ 节律, µV\n30 : O1-A1 α 节律, µV\n31 : O1-A1 β(LF)节律, µV\n32 : O2-A2 θ 节律, µV\n33 : O2-A2 α 节律, µV\n34 : F7-A1 δ 节律,µV\n35 : F7-A1 α 节律, µV\n36 : F7-A1 β(LF)节律, µV\n37 : F8-A2 δ 节律,µV\n38 : F8-A2 θ 节律, µV\n39 : T3-A1 δ 节律,µV\n40 : T3-A1 α 节律, µV\n41 : T3-A1 β(LF)节律, µV\n42 : T5-A1 α 节律, µV\n43 : T6-A2 δ 节律,µV\n"]],[["## 利用GBDT进行特征选择","_____no_output_____"]],[["GBDTClassifier = GradientBoostingClassifier().fit(X_withLabel, y_label)\r\nmodelGBDTClassifier = SelectFromModel(GBDTClassifier, prefit=True)\r\nX_GBDTClassifier = modelGBDTClassifier.transform(X_withLabel)\r\n\r\nGBDTClassifierIndexMask = modelGBDTClassifier.get_support() # 获取筛选的mask\r\nvalue = X_withLabel[:,GBDTClassifierIndexMask].tolist() # 被筛选出来的列的值\r\nGBDTClassifierIndexMask = GBDTClassifierIndexMask.tolist() \r\nGBDTClassifierIndexTrue = []\r\nGBDTClassifierIndexFalse = []\r\n\r\nfor i in range(len(GBDTClassifierIndexMask)): # 记录下被筛选的indicator的序号\r\n if (GBDTClassifierIndexMask[i]==True):\r\n GBDTClassifierIndexTrue.append(i)\r\n if (GBDTClassifierIndexMask[i]==False):\r\n GBDTClassifierIndexFalse.append(i)\r\nprint(\"被筛选后剩下的特征:\")\r\nfor i in range(len(GBDTClassifierIndexTrue)):\r\n print(i+1,\":\",name[GBDTClassifierIndexTrue[i]])\r\nprint(\"\\n被筛选后去掉的特征:\")\r\nfor i in range(len(GBDTClassifierIndexFalse)):\r\n print(i+1,\":\",name[GBDTClassifierIndexFalse[i]])\r\n\r\ndataFrameOfGBDTClassificationFeature = dataFrame\r\nfor i in range(len(GBDTClassifierIndexFalse)):\r\n dataFrameOfGBDTClassificationFeature = dataFrameOfGBDTClassificationFeature.drop([name[GBDTClassifierIndexFalse[i]]],axis=1)\r\ndataFrameOfGBDTClassificationFeature.to_excel('/content/drive/MyDrive/DataMining/final/GBDTClassifierFeatureSelectionOfLabel.xlsx')\r\ndataFrameOfGBDTClassificationFeature","被筛选后剩下的特征:\n1 : FP1-A1 α 节律, µV\n2 : FP2-A2 θ 节律, µV\n3 : FP2-A2 β(LF)节律, µV\n4 : C4-A2 θ 节律, µV\n5 : P3-A1 α 节律, µV\n6 : P4-A2 α 节律, µV\n7 : P4-A2 β(LF)节律, µV\n8 : PZ-A2 β(LF)节律, µV\n9 : O2-A2 δ 节律,µV\n10 : F7-A1 δ 节律,µV\n11 : F8-A2 δ 节律,µV\n12 : F8-A2 β(LF)节律, µV\n13 : T3-A1 θ 节律, µV\n14 : T4-A2 δ 节律,µV\n15 : T4-A2 θ 节律, µV\n16 : T5-A1 α 节律, µV\n\n被筛选后去掉的特征:\n1 : FP1-A1 δ 节律,µV\n2 : FP1-A1 θ 节律, µV\n3 : FP1-A1 β(LF)节律, µV\n4 : FP2-A2 δ 节律,µV\n5 : FP2-A2 α 节律, µV\n6 : F3-A1 δ 节律,µV\n7 : F3-A1 θ 节律, µV\n8 : F3-A1 α 节律, µV\n9 : F3-A1 β(LF)节律, µV\n10 : F4-A2 δ 节律,µV\n11 : F4-A2 θ 节律, µV\n12 : F4-A2 α 节律, µV\n13 : F4-A2 β(LF)节律, µV\n14 : FZ-A2 δ 节律,µV\n15 : FZ-A2 θ 节律, µV\n16 : FZ-A2 α 节律, µV\n17 : FZ-A2 β(LF)节律, µV\n18 : C3-A1 δ 节律,µV\n19 : C3-A1 θ 节律, µV\n20 : C3-A1 α 节律, µV\n21 : C3-A1 β(LF)节律, µV\n22 : C4-A2 δ 节律,µV\n23 : C4-A2 α 节律, µV\n24 : C4-A2 β(LF)节律, µV\n25 : CZ-A1 δ 节律,µV\n26 : CZ-A1 θ 节律, µV\n27 : CZ-A1 α 节律, µV\n28 : CZ-A1 β(LF)节律, µV\n29 : P3-A1 δ 节律,µV\n30 : P3-A1 θ 节律, µV\n31 : P3-A1 β(LF)节律, µV\n32 : P4-A2 δ 节律,µV\n33 : P4-A2 θ 节律, µV\n34 : PZ-A2 δ 节律,µV\n35 : PZ-A2 θ 节律, µV\n36 : PZ-A2 α 节律, µV\n37 : O1-A1 δ 节律,µV\n38 : O1-A1 θ 节律, µV\n39 : O1-A1 α 节律, µV\n40 : O1-A1 β(LF)节律, µV\n41 : O2-A2 θ 节律, µV\n42 : O2-A2 α 节律, µV\n43 : O2-A2 β(LF)节律, µV\n44 : F7-A1 θ 节律, µV\n45 : F7-A1 α 节律, µV\n46 : F7-A1 β(LF)节律, µV\n47 : F8-A2 θ 节律, µV\n48 : F8-A2 α 节律, µV\n49 : T3-A1 δ 节律,µV\n50 : T3-A1 α 节律, µV\n51 : T3-A1 β(LF)节律, µV\n52 : T4-A2 α 节律, µV\n53 : T4-A2 β(LF)节律, µV\n54 : T5-A1 δ 节律,µV\n55 : T5-A1 θ 节律, µV\n56 : T5-A1 β(LF)节律, µV\n57 : T6-A2 δ 节律,µV\n58 : T6-A2 θ 节律, µV\n59 : T6-A2 α 节律, µV\n60 : T6-A2 β(LF)节律, µV\n"]],[["# 测试选取的特征","_____no_output_____"],["## 读入PCA和LDA降维后的数据","_____no_output_____"],["## 获取特征选取后的数据","_____no_output_____"]],[["RegressionFeatureSelection = [dataFrameOfLassoRegressionFeature,dataFrameOfLSVRegressionFeature,dataFrameOfDecisionTreeRegressionFeature,\r\n dataFrameOfRandomForestRegressionFeature,dataFrameOfGBDTRegressionFeature]\r\n\r\nClassificationFeatureSelection = [dataFrameOfLassoClassificationFeature,dataFrameOfLSVClassificationFeature,dataFrameOfDecisionTreeClassificationFeature,\r\n dataFrameOfRandomForestClassificationFeature,dataFrameOfGBDTClassificationFeature]","_____no_output_____"]],[["## 筛选回归的特征","_____no_output_____"]],[["allMSEResult=[]\r\nallr2Result=[]\r\n\r\nprint(\"LR测试结果\")\r\nfor i in range(len(RegressionFeatureSelection)):\r\n tempArray = np.array(RegressionFeatureSelection[i])[:92,:]\r\n temp_X = tempArray[:,5:]\r\n temp_y = tempArray[:,3]\r\n train_X,test_X,train_y,test_y = train_test_split(temp_X,temp_y,test_size=0.2,random_state=4)\r\n clf=LinearRegression()\r\n clf.fit(train_X,train_y)\r\n pred_y = clf.predict(test_X)\r\n if(i==0):\r\n tempMSE=[]\r\n tempr2=[]\r\n tempMSE.append(mean_squared_error(test_y,pred_y))\r\n tempr2.append(r2_score(test_y,pred_y))\r\n if(i==len(RegressionFeatureSelection)-1):\r\n allMSEResult.append(min(tempMSE))\r\n allr2Result.append(max(tempr2))\r\n print('Mean squared error: %.2f'\r\n % mean_squared_error(test_y, pred_y))\r\n print('Coefficient of determination: %.2f'\r\n % r2_score(test_y, pred_y))\r\n\r\nprint(\"\\nSVR测试结果\")\r\nfor i in range(len(RegressionFeatureSelection)):\r\n tempArray = np.array(RegressionFeatureSelection[i])[:92,:]\r\n temp_X = tempArray[:,5:]\r\n temp_y = tempArray[:,3]\r\n train_X,test_X,train_y,test_y = train_test_split(temp_X,temp_y,test_size=0.2,random_state=4)\r\n clf=SVR()\r\n clf.fit(train_X,train_y)\r\n pred_y = clf.predict(test_X)\r\n if(i==0):\r\n tempMSE=[]\r\n tempr2=[]\r\n tempMSE.append(mean_squared_error(test_y,pred_y))\r\n tempr2.append(r2_score(test_y,pred_y))\r\n if(i==len(RegressionFeatureSelection)-1):\r\n allMSEResult.append(min(tempMSE))\r\n allr2Result.append(max(tempr2))\r\n print('Mean squared error: %.2f'\r\n % mean_squared_error(test_y, pred_y))\r\n print('Coefficient of determination: %.2f'\r\n % r2_score(test_y, pred_y))\r\n \r\nprint(\"\\n决策树测试结果\")\r\nfor i in range(len(RegressionFeatureSelection)):\r\n tempArray = np.array(RegressionFeatureSelection[i])[:92,:]\r\n temp_X = tempArray[:,5:]\r\n temp_y = tempArray[:,3]\r\n train_X,test_X,train_y,test_y = train_test_split(temp_X,temp_y,test_size=0.2,random_state=4)\r\n clf=DecisionTreeRegressor(random_state=4)\r\n clf.fit(train_X,train_y)\r\n pred_y = clf.predict(test_X)\r\n if(i==0):\r\n tempMSE=[]\r\n tempr2=[]\r\n tempMSE.append(mean_squared_error(test_y,pred_y))\r\n tempr2.append(r2_score(test_y,pred_y))\r\n if(i==len(RegressionFeatureSelection)-1):\r\n allMSEResult.append(min(tempMSE))\r\n allr2Result.append(max(tempr2))\r\n print('Mean squared error: %.2f'\r\n % mean_squared_error(test_y, pred_y))\r\n print('Coefficient of determination: %.2f'\r\n % r2_score(test_y, pred_y))\r\n\r\nprint(\"\\nGBDT测试结果\")\r\nfor i in range(len(RegressionFeatureSelection)):\r\n tempArray = np.array(RegressionFeatureSelection[i])[:92,:]\r\n temp_X = tempArray[:,5:]\r\n temp_y = tempArray[:,3]\r\n train_X,test_X,train_y,test_y = train_test_split(temp_X,temp_y,test_size=0.2,random_state=4)\r\n clf=GradientBoostingRegressor(random_state=4)\r\n clf.fit(train_X,train_y)\r\n pred_y = clf.predict(test_X)\r\n if(i==0):\r\n tempMSE=[]\r\n tempr2=[]\r\n tempMSE.append(mean_squared_error(test_y,pred_y))\r\n tempr2.append(r2_score(test_y,pred_y))\r\n if(i==len(RegressionFeatureSelection)-1):\r\n allMSEResult.append(min(tempMSE))\r\n allr2Result.append(max(tempr2))\r\n print('Mean squared error: %.2f'\r\n % mean_squared_error(test_y, pred_y))\r\n print('Coefficient of determination: %.2f'\r\n % r2_score(test_y, pred_y))\r\n \r\nprint(\"\\n随机森林测试结果\")\r\nfor i in range(len(RegressionFeatureSelection)):\r\n tempArray = np.array(RegressionFeatureSelection[i])[:92,:]\r\n temp_X = tempArray[:,5:]\r\n temp_y = tempArray[:,3]\r\n train_X,test_X,train_y,test_y = train_test_split(temp_X,temp_y,test_size=0.2,random_state=4)\r\n clf=RandomForestRegressor(random_state=4)\r\n clf.fit(train_X,train_y)\r\n pred_y = clf.predict(test_X)\r\n if(i==0):\r\n tempMSE=[]\r\n tempr2=[]\r\n tempMSE.append(mean_squared_error(test_y,pred_y))\r\n tempr2.append(r2_score(test_y,pred_y))\r\n if(i==len(RegressionFeatureSelection)-1):\r\n allMSEResult.append(min(tempMSE))\r\n allr2Result.append(max(tempr2))\r\n print('Mean squared error: %.2f'\r\n % mean_squared_error(test_y, pred_y))\r\n print('Coefficient of determination: %.2f'\r\n % r2_score(test_y, pred_y))\r\n \r\nmodelNamelist = ['LR','SVR','决策树','GBDT','随机森林']\r\nfor i in range(5):\r\n if(i==0):\r\n print()\r\n print(modelNamelist[i]+\"测试结果\")\r\n print('Best MSE -',i+1,': %.2f'\r\n % (allMSEResult)[i])\r\n print('Best R2-Score -',i+1,': %.2f\\n'\r\n % (allr2Result)[i])","_____no_output_____"]],[["## 原始特征回归表现","_____no_output_____"]],[["print(\"LR测试结果\")\r\ntempArray = dataArray[:92,:]\r\ntemp_X = tempArray[:,5:]\r\ntemp_y = tempArray[:,3].astype(int)\r\ntrain_X,test_X,train_y,test_y = train_test_split(temp_X,temp_y,test_size=0.2,random_state=4)\r\nclf=LinearRegression()\r\nclf.fit(train_X,train_y)\r\npred_y = clf.predict(test_X)\r\nprint('Mean squared error: %.2f'\r\n % mean_squared_error(test_y, pred_y))\r\nprint('R2-Score: %.2f'\r\n % r2_score(test_y, pred_y))\r\n\r\nprint(\"\\nSVR测试结果\")\r\ntempArray = dataArray[:92,:]\r\ntemp_X = tempArray[:,5:]\r\ntemp_y = tempArray[:,3].astype(int)\r\ntrain_X,test_X,train_y,test_y = train_test_split(temp_X,temp_y,test_size=0.2,random_state=4)\r\nclf=SVR()\r\nclf.fit(train_X,train_y)\r\npred_y = clf.predict(test_X)\r\nprint('Mean squared error: %.2f'\r\n % mean_squared_error(test_y, pred_y))\r\nprint('R2-Score: %.2f'\r\n % r2_score(test_y, pred_y))\r\n\r\nprint(\"\\n决策树测试结果\")\r\ntempArray = dataArray[:92,:]\r\ntemp_X = tempArray[:,5:]\r\ntemp_y = tempArray[:,3].astype(int)\r\ntrain_X,test_X,train_y,test_y = train_test_split(temp_X,temp_y,test_size=0.2,random_state=4)\r\nclf=DecisionTreeRegressor(random_state=0)\r\nclf.fit(train_X,train_y)\r\npred_y = clf.predict(test_X)\r\nprint('Mean squared error: %.2f'\r\n % mean_squared_error(test_y, pred_y))\r\nprint('R2-Score: %.2f'\r\n % r2_score(test_y, pred_y))\r\n\r\nprint(\"\\nGBDT测试结果\")\r\ntempArray = dataArray[:92,:]\r\ntemp_X = tempArray[:,5:]\r\ntemp_y = tempArray[:,3].astype(int)\r\ntrain_X,test_X,train_y,test_y = train_test_split(temp_X,temp_y,test_size=0.2,random_state=4)\r\nclf=GradientBoostingRegressor(random_state=0)\r\nclf.fit(train_X,train_y)\r\npred_y = clf.predict(test_X)\r\nprint('Mean squared error: %.2f'\r\n % mean_squared_error(test_y, pred_y))\r\nprint('R2-Score: %.2f'\r\n % r2_score(test_y, pred_y))\r\n\r\nprint(\"\\n随机森林测试结果\")\r\ntempArray = dataArray[:92,:]\r\ntemp_X = tempArray[:,5:]\r\ntemp_y = tempArray[:,3].astype(int)\r\ntrain_X,test_X,train_y,test_y = train_test_split(temp_X,temp_y,test_size=0.2,random_state=4)\r\nclf=RandomForestRegressor(random_state=0)\r\nclf.fit(train_X,train_y)\r\npred_y = clf.predict(test_X)\r\nprint('Mean squared error: %.2f'\r\n % mean_squared_error(test_y, pred_y))\r\nprint('R2-Score: %.2f'\r\n % r2_score(test_y, pred_y))","_____no_output_____"]],[["## 筛选分类的特征","_____no_output_____"]],[["allAccuracyResult=[]\r\nallF1Result=[]\r\nprint(\"LR测试结果\")\r\nfor i in range(len(ClassificationFeatureSelection)):\r\n tempArray = np.array(ClassificationFeatureSelection[i])[:92,:]\r\n temp_X = tempArray[:,5:]\r\n temp_y = tempArray[:,4].astype(int)\r\n train_X,test_X,train_y,test_y = train_test_split(temp_X,temp_y,test_size=0.2,random_state=4)\r\n clf=LogisticRegression(max_iter=10000)\r\n clf.fit(train_X,train_y)\r\n pred_y = clf.predict(test_X)\r\n if(i==0):\r\n tempAccuracy=[]\r\n tempF1=[]\r\n tempAccuracy.append(accuracy_score(test_y,pred_y))\r\n tempF1.append(f1_score(test_y,pred_y))\r\n if(i==len(ClassificationFeatureSelection)-1):\r\n allAccuracyResult.append(max(tempAccuracy))\r\n allF1Result.append(max(tempF1))\r\n print('Accuracy: %.2f'\r\n % accuracy_score(test_y, pred_y))\r\n print('F1-Score: %.2f\\n'\r\n % f1_score(test_y, pred_y))\r\n\r\nprint(\"\\nSVC测试结果\")\r\nfor i in range(len(ClassificationFeatureSelection)):\r\n tempArray = np.array(ClassificationFeatureSelection[i])[:92,:]\r\n temp_X = tempArray[:,5:]\r\n temp_y = tempArray[:,4].astype(int)\r\n train_X,test_X,train_y,test_y = train_test_split(temp_X,temp_y,test_size=0.2,random_state=4)\r\n clf=SVC()\r\n clf.fit(train_X,train_y)\r\n pred_y = clf.predict(test_X)\r\n if(i==0):\r\n tempAccuracy=[]\r\n tempF1=[]\r\n tempAccuracy.append(accuracy_score(test_y,pred_y))\r\n tempF1.append(f1_score(test_y,pred_y))\r\n if(i==len(ClassificationFeatureSelection)-1):\r\n allAccuracyResult.append(max(tempAccuracy))\r\n allF1Result.append(max(tempF1))\r\n print('Accuracy: %.2f'\r\n % accuracy_score(test_y, pred_y))\r\n print('F1-Score: %.2f\\n'\r\n % f1_score(test_y, pred_y))\r\n \r\nprint(\"\\n决策树测试结果\")\r\nfor i in range(len(ClassificationFeatureSelection)):\r\n tempArray = np.array(ClassificationFeatureSelection[i])[:92,:]\r\n temp_X = tempArray[:,5:]\r\n temp_y = tempArray[:,4].astype(int)\r\n train_X,test_X,train_y,test_y = train_test_split(temp_X,temp_y,test_size=0.2,random_state=4)\r\n clf=DecisionTreeClassifier(random_state=0)\r\n clf.fit(train_X,train_y)\r\n pred_y = clf.predict(test_X)\r\n if(i==0):\r\n tempAccuracy=[]\r\n tempF1=[]\r\n tempAccuracy.append(accuracy_score(test_y,pred_y))\r\n tempF1.append(f1_score(test_y,pred_y))\r\n if(i==len(ClassificationFeatureSelection)-1):\r\n allAccuracyResult.append(max(tempAccuracy))\r\n allF1Result.append(max(tempF1))\r\n print('Accuracy: %.2f'\r\n % accuracy_score(test_y, pred_y))\r\n print('F1-Score: %.2f\\n'\r\n % f1_score(test_y, pred_y))\r\n\r\nprint(\"\\nGBDT测试结果\")\r\nfor i in range(len(ClassificationFeatureSelection)):\r\n tempArray = np.array(ClassificationFeatureSelection[i])[:92,:]\r\n temp_X = tempArray[:,5:]\r\n temp_y = tempArray[:,4].astype(int)\r\n train_X,test_X,train_y,test_y = train_test_split(temp_X,temp_y,test_size=0.2,random_state=4)\r\n clf=GradientBoostingClassifier(random_state=0)\r\n clf.fit(train_X,train_y)\r\n pred_y = clf.predict(test_X)\r\n if(i==0):\r\n tempAccuracy=[]\r\n tempF1=[]\r\n tempAccuracy.append(accuracy_score(test_y,pred_y))\r\n tempF1.append(f1_score(test_y,pred_y))\r\n if(i==len(ClassificationFeatureSelection)-1):\r\n allAccuracyResult.append(max(tempAccuracy))\r\n allF1Result.append(max(tempF1))\r\n print('Accuracy: %.2f'\r\n % accuracy_score(test_y, pred_y))\r\n print('F1-Score: %.2f\\n'\r\n % f1_score(test_y, pred_y))\r\n \r\nprint(\"\\n随机森林测试结果\")\r\nfor i in range(len(ClassificationFeatureSelection)):\r\n tempArray = np.array(ClassificationFeatureSelection[i])[:92,:]\r\n temp_X = tempArray[:,5:]\r\n temp_y = tempArray[:,4].astype(int)\r\n train_X,test_X,train_y,test_y = train_test_split(temp_X,temp_y,test_size=0.2,random_state=4)\r\n clf=RandomForestClassifier(random_state=0)\r\n clf.fit(train_X,train_y)\r\n pred_y = clf.predict(test_X)\r\n if(i==0):\r\n tempAccuracy=[]\r\n tempF1=[]\r\n tempAccuracy.append(accuracy_score(test_y,pred_y))\r\n tempF1.append(f1_score(test_y,pred_y))\r\n if(i==len(ClassificationFeatureSelection)-1):\r\n allAccuracyResult.append(max(tempAccuracy))\r\n allF1Result.append(max(tempF1))\r\n print('Accuracy: %.2f'\r\n % accuracy_score(test_y, pred_y))\r\n print('F1-Score: %.2f\\n'\r\n % f1_score(test_y, pred_y))\r\n\r\nmodelNamelist = ['LR','SVR','决策树','GBDT','随机森林']\r\nfor i in range(5):\r\n if(i==0):\r\n print()\r\n print(modelNamelist[i]+\"测试结果\")\r\n print('Best Accuracy -',i+1,': %.2f'\r\n % (allAccuracyResult)[i])\r\n print('Best F1-Score -',i+1,': %.2f\\n'\r\n % (allF1Result)[i])\r\n","_____no_output_____"]],[["## 原始特征分类表现","_____no_output_____"]],[["print(\"LR测试结果\")\r\ntempArray = dataArray[:92,:]\r\ntemp_X = tempArray[:,5:]\r\ntemp_y = tempArray[:,4].astype(int)\r\ntrain_X,test_X,train_y,test_y = train_test_split(temp_X,temp_y,test_size=0.2,random_state=4)\r\nclf=LogisticRegression(max_iter=10000)\r\nclf.fit(train_X,train_y)\r\npred_y = clf.predict(test_X)\r\nprint('Accuracy: %.2f'\r\n % accuracy_score(test_y, pred_y))\r\nprint('F1-Score: %.2f'\r\n % f1_score(test_y, pred_y))\r\n\r\nprint(\"\\nSVR测试结果\")\r\ntempArray = dataArray[:92,:]\r\ntemp_X = tempArray[:,5:]\r\ntemp_y = tempArray[:,4].astype(int)\r\ntrain_X,test_X,train_y,test_y = train_test_split(temp_X,temp_y,test_size=0.2,random_state=4)\r\nclf=SVC()\r\nclf.fit(train_X,train_y)\r\npred_y = clf.predict(test_X)\r\nprint('Accuracy: %.2f'\r\n % accuracy_score(test_y, pred_y))\r\nprint('F1-Score: %.2f'\r\n % f1_score(test_y, pred_y))\r\n\r\nprint(\"\\n决策树测试结果\")\r\ntempArray = dataArray[:92,:]\r\ntemp_X = tempArray[:,5:]\r\ntemp_y = tempArray[:,4].astype(int)\r\ntrain_X,test_X,train_y,test_y = train_test_split(temp_X,temp_y,test_size=0.2,random_state=4)\r\nclf=DecisionTreeClassifier(random_state=0)\r\nclf.fit(train_X,train_y)\r\npred_y = clf.predict(test_X)\r\nprint('Accuracy: %.2f'\r\n % accuracy_score(test_y, pred_y))\r\nprint('F1-Score: %.2f'\r\n % f1_score(test_y, pred_y))\r\n\r\nprint(\"\\nGBDT测试结果\")\r\ntempArray = dataArray[:92,:]\r\ntemp_X = tempArray[:,5:]\r\ntemp_y = tempArray[:,4].astype(int)\r\ntrain_X,test_X,train_y,test_y = train_test_split(temp_X,temp_y,test_size=0.2,random_state=4)\r\nclf=GradientBoostingClassifier(random_state=0)\r\nclf.fit(train_X,train_y)\r\npred_y = clf.predict(test_X)\r\nprint('Accuracy: %.2f'\r\n % accuracy_score(test_y, pred_y))\r\nprint('F1-Score: %.2f'\r\n % f1_score(test_y, pred_y))\r\n\r\nprint(\"\\n随机森林测试结果\")\r\ntempArray = dataArray[:92,:]\r\ntemp_X = tempArray[:,5:]\r\ntemp_y = tempArray[:,4].astype(int)\r\ntrain_X,test_X,train_y,test_y = train_test_split(temp_X,temp_y,test_size=0.2,random_state=4)\r\nclf=RandomForestClassifier(random_state=0)\r\nclf.fit(train_X,train_y)\r\npred_y = clf.predict(test_X)\r\nprint('Accuracy: %.2f'\r\n % accuracy_score(test_y, pred_y))\r\nprint('F1-Score: %.2f'\r\n % f1_score(test_y, pred_y))","_____no_output_____"]]],"string":"[\n [\n [\n \"# 导入库\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"import pandas as pd\\r\\nimport numpy as np\\r\\nfrom sklearn.svm import LinearSVR, LinearSVC\\r\\nfrom sklearn.svm import *\\r\\nfrom sklearn.linear_model import Lasso, LogisticRegression, LinearRegression\\r\\nfrom sklearn.tree import DecisionTreeRegressor,DecisionTreeClassifier\\r\\nfrom sklearn.ensemble import RandomForestRegressor, RandomForestClassifier, GradientBoostingRegressor, GradientBoostingClassifier\\r\\nfrom sklearn.feature_selection import SelectFromModel\\r\\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\\r\\nfrom sklearn.decomposition import PCA,LatentDirichletAllocation\\r\\nfrom sklearn.metrics import *\\r\\nfrom sklearn.model_selection import train_test_split\\r\\nfrom sklearn.pipeline import Pipeline\\r\\nfrom sklearn.preprocessing import StandardScaler\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# 读取数据集\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"filePath = './data/138rows_after.xlsx'\\ndataFrame = pd.read_excel(filePath)\\ndataArray = np.array(dataFrame)\\ndataFrame\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# 获取标签列\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"name = [column for column in dataFrame]\\r\\nname = name[5:]\\r\\npd.DataFrame(name)\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# 查看数据规模\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"X_withLabel = dataArray[:92,5:]\\r\\nX_all = dataArray[:,5:] \\r\\ny_data = dataArray[:92,3]\\r\\ny_label= dataArray[:92,4].astype(int)\\r\\nprint(\\\"有标签数据的规模:\\\",X_withLabel.shape)\\r\\nprint(\\\"所有数据的规模:\\\",X_all.shape)\\r\\nprint(\\\"回归标签的规模:\\\",y_data.shape)\\r\\nprint(\\\"分类标签的规模:\\\",y_label.shape)\",\n \"有标签数据的规模: (92, 76)\\n所有数据的规模: (138, 76)\\n回归标签的规模: (92,)\\n分类标签的规模: (92,)\\n\"\n ]\n ],\n [\n [\n \"# 回归\",\n \"_____no_output_____\"\n ],\n [\n \"## 利用Lasso进行特征选择\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"lasso = Lasso(alpha = 0.5,max_iter=5000).fit(X_withLabel, y_data)\\r\\nmodelLasso = SelectFromModel(lasso, prefit=True)\\r\\nX_Lasso = modelLasso.transform(X_withLabel)\\r\\n\\r\\nLassoIndexMask = modelLasso.get_support() # 获取筛选的mask\\r\\nvalue = X_withLabel[:,LassoIndexMask].tolist() # 被筛选出来的列的值\\r\\nLassoIndexMask = LassoIndexMask.tolist() \\r\\nLassoIndexTrue = []\\r\\nLassoIndexFalse = []\\r\\n\\r\\nfor i in range(len(LassoIndexMask)): # 记录下被筛选的indicator的序号\\r\\n if (LassoIndexMask[i]==True):\\r\\n LassoIndexTrue.append(i)\\r\\n if (LassoIndexMask[i]==False):\\r\\n LassoIndexFalse.append(i)\\r\\nprint(\\\"被筛选后剩下的特征:\\\")\\r\\nfor i in range(len(LassoIndexTrue)):\\r\\n print(i+1,\\\":\\\",name[LassoIndexTrue[i]])\\r\\nprint(\\\"\\\\n被筛选后去掉的特征:\\\")\\r\\nfor i in range(len(LassoIndexFalse)):\\r\\n print(i+1,\\\":\\\",name[LassoIndexFalse[i]])\\r\\n\\r\\ndataFrameOfLassoRegressionFeature = dataFrame\\r\\nfor i in range(len(LassoIndexFalse)):\\r\\n dataFrameOfLassoRegressionFeature = dataFrameOfLassoRegressionFeature.drop([name[LassoIndexFalse[i]]],axis=1)\\r\\ndataFrameOfLassoRegressionFeature.to_excel('/content/drive/MyDrive/DataMining/final/LassoFeatureSelectionOfData.xlsx')\\r\\ndataFrameOfLassoRegressionFeature\",\n \"被筛选后剩下的特征:\\n1 : FP1-A1 θ 节律, µV\\n2 : FP1-A1 α 节律, µV\\n3 : FP2-A2 δ 节律,µV\\n4 : FP2-A2 θ 节律, µV\\n5 : FP2-A2 α 节律, µV\\n6 : FP2-A2 β(LF)节律, µV\\n7 : F3-A1 α 节律, µV\\n8 : F4-A2 α 节律, µV\\n9 : FZ-A2 δ 节律,µV\\n10 : C3-A1 α 节律, µV\\n11 : C4-A2 θ 节律, µV\\n12 : C4-A2 α 节律, µV\\n13 : C4-A2 β(LF)节律, µV\\n14 : CZ-A1 α 节律, µV\\n15 : P3-A1 δ 节律,µV\\n16 : P4-A2 α 节律, µV\\n17 : P4-A2 β(LF)节律, µV\\n18 : PZ-A2 δ 节律,µV\\n19 : PZ-A2 α 节律, µV\\n20 : PZ-A2 β(LF)节律, µV\\n21 : O1-A1 δ 节律,µV\\n22 : O1-A1 θ 节律, µV\\n23 : O1-A1 α 节律, µV\\n24 : O2-A2 δ 节律,µV\\n25 : O2-A2 θ 节律, µV\\n26 : F7-A1 δ 节律,µV\\n27 : F8-A2 δ 节律,µV\\n28 : T3-A1 θ 节律, µV\\n29 : T3-A1 α 节律, µV\\n30 : T3-A1 β(LF)节律, µV\\n31 : T4-A2 δ 节律,µV\\n32 : T4-A2 α 节律, µV\\n33 : T4-A2 β(LF)节律, µV\\n34 : T5-A1 δ 节律,µV\\n35 : T5-A1 θ 节律, µV\\n36 : T5-A1 α 节律, µV\\n37 : T6-A2 θ 节律, µV\\n38 : T6-A2 α 节律, µV\\n39 : T6-A2 β(LF)节律, µV\\n\\n被筛选后去掉的特征:\\n1 : FP1-A1 δ 节律,µV\\n2 : FP1-A1 β(LF)节律, µV\\n3 : F3-A1 δ 节律,µV\\n4 : F3-A1 θ 节律, µV\\n5 : F3-A1 β(LF)节律, µV\\n6 : F4-A2 δ 节律,µV\\n7 : F4-A2 θ 节律, µV\\n8 : F4-A2 β(LF)节律, µV\\n9 : FZ-A2 θ 节律, µV\\n10 : FZ-A2 α 节律, µV\\n11 : FZ-A2 β(LF)节律, µV\\n12 : C3-A1 δ 节律,µV\\n13 : C3-A1 θ 节律, µV\\n14 : C3-A1 β(LF)节律, µV\\n15 : C4-A2 δ 节律,µV\\n16 : CZ-A1 δ 节律,µV\\n17 : CZ-A1 θ 节律, µV\\n18 : CZ-A1 β(LF)节律, µV\\n19 : P3-A1 θ 节律, µV\\n20 : P3-A1 α 节律, µV\\n21 : P3-A1 β(LF)节律, µV\\n22 : P4-A2 δ 节律,µV\\n23 : P4-A2 θ 节律, µV\\n24 : PZ-A2 θ 节律, µV\\n25 : O1-A1 β(LF)节律, µV\\n26 : O2-A2 α 节律, µV\\n27 : O2-A2 β(LF)节律, µV\\n28 : F7-A1 θ 节律, µV\\n29 : F7-A1 α 节律, µV\\n30 : F7-A1 β(LF)节律, µV\\n31 : F8-A2 θ 节律, µV\\n32 : F8-A2 α 节律, µV\\n33 : F8-A2 β(LF)节律, µV\\n34 : T3-A1 δ 节律,µV\\n35 : T4-A2 θ 节律, µV\\n36 : T5-A1 β(LF)节律, µV\\n37 : T6-A2 δ 节律,µV\\n\"\n ]\n ],\n [\n [\n \"## 利用SVR进行特征选择\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"lsvr = LinearSVR(C=10,max_iter=10000,loss='squared_epsilon_insensitive',dual=False).fit(X_withLabel, y_data)\\r\\nmodelLSVR = SelectFromModel(lsvr, prefit=True)\\r\\nX_LSVR = modelLSVR.transform(X_withLabel)\\r\\n\\r\\nSVRIndexMask = modelLSVR.get_support() # 获取筛选的mask\\r\\nvalue = X_withLabel[:,SVRIndexMask].tolist() # 被筛选出来的列的值\\r\\nSVRIndexMask = SVRIndexMask.tolist() \\r\\nSVRIndexTrue = []\\r\\nSVRIndexFalse = []\\r\\n\\r\\nfor i in range(len(SVRIndexMask)): # 记录下被筛选的indicator的序号\\r\\n if (SVRIndexMask[i]==True):\\r\\n SVRIndexTrue.append(i)\\r\\n if (SVRIndexMask[i]==False):\\r\\n SVRIndexFalse.append(i)\\r\\nprint(\\\"被筛选后剩下的特征:\\\")\\r\\nfor i in range(len(SVRIndexTrue)):\\r\\n print(i+1,\\\":\\\",name[SVRIndexTrue[i]])\\r\\nprint(\\\"\\\\n被筛选后去掉的特征:\\\")\\r\\nfor i in range(len(SVRIndexFalse)):\\r\\n print(i+1,\\\":\\\",name[SVRIndexFalse[i]])\\r\\n\\r\\ndataFrameOfLSVRegressionFeature = dataFrame\\r\\nfor i in range(len(SVRIndexFalse)):\\r\\n dataFrameOfLSVRegressionFeature = dataFrameOfLSVRegressionFeature.drop([name[SVRIndexFalse[i]]],axis=1)\\r\\ndataFrameOfLSVRegressionFeature.to_excel('/content/drive/MyDrive/DataMining/final/LSVRFeatureSelectionOfLabel.xlsx')\\r\\ndataFrameOfLSVRegressionFeature\",\n \"被筛选后剩下的特征:\\n1 : FP1-A1 θ 节律, µV\\n2 : FP1-A1 β(LF)节律, µV\\n3 : FP2-A2 δ 节律,µV\\n4 : FP2-A2 θ 节律, µV\\n5 : FP2-A2 β(LF)节律, µV\\n6 : F3-A1 θ 节律, µV\\n7 : F4-A2 β(LF)节律, µV\\n8 : C3-A1 β(LF)节律, µV\\n9 : CZ-A1 θ 节律, µV\\n10 : CZ-A1 β(LF)节律, µV\\n11 : P3-A1 δ 节律,µV\\n12 : P3-A1 θ 节律, µV\\n13 : P3-A1 α 节律, µV\\n14 : P4-A2 δ 节律,µV\\n15 : P4-A2 θ 节律, µV\\n16 : P4-A2 α 节律, µV\\n17 : P4-A2 β(LF)节律, µV\\n18 : O1-A1 θ 节律, µV\\n19 : O1-A1 β(LF)节律, µV\\n20 : O2-A2 θ 节律, µV\\n21 : O2-A2 β(LF)节律, µV\\n22 : F7-A1 θ 节律, µV\\n23 : F7-A1 β(LF)节律, µV\\n24 : F8-A2 δ 节律,µV\\n25 : F8-A2 α 节律, µV\\n26 : F8-A2 β(LF)节律, µV\\n27 : T4-A2 β(LF)节律, µV\\n28 : T5-A1 β(LF)节律, µV\\n29 : T6-A2 δ 节律,µV\\n30 : T6-A2 θ 节律, µV\\n\\n被筛选后去掉的特征:\\n1 : FP1-A1 δ 节律,µV\\n2 : FP1-A1 α 节律, µV\\n3 : FP2-A2 α 节律, µV\\n4 : F3-A1 δ 节律,µV\\n5 : F3-A1 α 节律, µV\\n6 : F3-A1 β(LF)节律, µV\\n7 : F4-A2 δ 节律,µV\\n8 : F4-A2 θ 节律, µV\\n9 : F4-A2 α 节律, µV\\n10 : FZ-A2 δ 节律,µV\\n11 : FZ-A2 θ 节律, µV\\n12 : FZ-A2 α 节律, µV\\n13 : FZ-A2 β(LF)节律, µV\\n14 : C3-A1 δ 节律,µV\\n15 : C3-A1 θ 节律, µV\\n16 : C3-A1 α 节律, µV\\n17 : C4-A2 δ 节律,µV\\n18 : C4-A2 θ 节律, µV\\n19 : C4-A2 α 节律, µV\\n20 : C4-A2 β(LF)节律, µV\\n21 : CZ-A1 δ 节律,µV\\n22 : CZ-A1 α 节律, µV\\n23 : P3-A1 β(LF)节律, µV\\n24 : PZ-A2 δ 节律,µV\\n25 : PZ-A2 θ 节律, µV\\n26 : PZ-A2 α 节律, µV\\n27 : PZ-A2 β(LF)节律, µV\\n28 : O1-A1 δ 节律,µV\\n29 : O1-A1 α 节律, µV\\n30 : O2-A2 δ 节律,µV\\n31 : O2-A2 α 节律, µV\\n32 : F7-A1 δ 节律,µV\\n33 : F7-A1 α 节律, µV\\n34 : F8-A2 θ 节律, µV\\n35 : T3-A1 δ 节律,µV\\n36 : T3-A1 θ 节律, µV\\n37 : T3-A1 α 节律, µV\\n38 : T3-A1 β(LF)节律, µV\\n39 : T4-A2 δ 节律,µV\\n40 : T4-A2 θ 节律, µV\\n41 : T4-A2 α 节律, µV\\n42 : T5-A1 δ 节律,µV\\n43 : T5-A1 θ 节律, µV\\n44 : T5-A1 α 节律, µV\\n45 : T6-A2 α 节律, µV\\n46 : T6-A2 β(LF)节律, µV\\n\"\n ]\n ],\n [\n [\n \"## 利用树进行特征选择\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"decisionTree = DecisionTreeRegressor(min_samples_leaf=1,random_state=1).fit(X_withLabel, y_data)\\r\\nmodelDecisionTree = SelectFromModel(decisionTree, prefit=True)\\r\\nX_DecisionTree = modelDecisionTree.transform(X_withLabel)\\r\\n\\r\\ndecisionTreeIndexMask = modelDecisionTree.get_support() # 获取筛选的mask\\r\\nvalue = X_withLabel[:,LassoIndexMask].tolist() # 被筛选出来的列的值\\r\\ndecisionTreeIndexMask = decisionTreeIndexMask.tolist() \\r\\ndecisionTreeIndexTrue = []\\r\\ndecisionTreeIndexFalse = []\\r\\n\\r\\nfor i in range(len(decisionTreeIndexMask)): # 记录下被筛选的indicator的序号\\r\\n if (decisionTreeIndexMask[i]==True):\\r\\n decisionTreeIndexTrue.append(i)\\r\\n if (decisionTreeIndexMask[i]==False):\\r\\n decisionTreeIndexFalse.append(i)\\r\\nprint(\\\"被筛选后剩下的特征:\\\")\\r\\nfor i in range(len(decisionTreeIndexTrue)):\\r\\n print(i+1,\\\":\\\",name[decisionTreeIndexTrue[i]])\\r\\nprint(\\\"\\\\n被筛选后去掉的特征:\\\")\\r\\nfor i in range(len(decisionTreeIndexFalse)):\\r\\n print(i+1,\\\":\\\",name[decisionTreeIndexFalse[i]])\\r\\n\\r\\ndataFrameOfDecisionTreeRegressionFeature = dataFrame\\r\\nfor i in range(len(decisionTreeIndexFalse)):\\r\\n dataFrameOfDecisionTreeRegressionFeature = dataFrameOfDecisionTreeRegressionFeature.drop([name[decisionTreeIndexFalse[i]]],axis=1)\\r\\ndataFrameOfDecisionTreeRegressionFeature.to_excel('/content/drive/MyDrive/DataMining/final/DecisionTreeFeatureSelectionOfData.xlsx')\\r\\ndataFrameOfDecisionTreeRegressionFeature\",\n \"被筛选后剩下的特征:\\n1 : F4-A2 θ 节律, µV\\n2 : F4-A2 α 节律, µV\\n3 : FZ-A2 θ 节律, µV\\n4 : FZ-A2 β(LF)节律, µV\\n5 : C3-A1 θ 节律, µV\\n6 : C3-A1 β(LF)节律, µV\\n7 : CZ-A1 β(LF)节律, µV\\n8 : P3-A1 δ 节律,µV\\n9 : P3-A1 β(LF)节律, µV\\n10 : PZ-A2 α 节律, µV\\n11 : O2-A2 δ 节律,µV\\n12 : O2-A2 α 节律, µV\\n13 : F8-A2 δ 节律,µV\\n14 : T3-A1 θ 节律, µV\\n15 : T5-A1 β(LF)节律, µV\\n16 : T6-A2 α 节律, µV\\n\\n被筛选后去掉的特征:\\n1 : FP1-A1 δ 节律,µV\\n2 : FP1-A1 θ 节律, µV\\n3 : FP1-A1 α 节律, µV\\n4 : FP1-A1 β(LF)节律, µV\\n5 : FP2-A2 δ 节律,µV\\n6 : FP2-A2 θ 节律, µV\\n7 : FP2-A2 α 节律, µV\\n8 : FP2-A2 β(LF)节律, µV\\n9 : F3-A1 δ 节律,µV\\n10 : F3-A1 θ 节律, µV\\n11 : F3-A1 α 节律, µV\\n12 : F3-A1 β(LF)节律, µV\\n13 : F4-A2 δ 节律,µV\\n14 : F4-A2 β(LF)节律, µV\\n15 : FZ-A2 δ 节律,µV\\n16 : FZ-A2 α 节律, µV\\n17 : C3-A1 δ 节律,µV\\n18 : C3-A1 α 节律, µV\\n19 : C4-A2 δ 节律,µV\\n20 : C4-A2 θ 节律, µV\\n21 : C4-A2 α 节律, µV\\n22 : C4-A2 β(LF)节律, µV\\n23 : CZ-A1 δ 节律,µV\\n24 : CZ-A1 θ 节律, µV\\n25 : CZ-A1 α 节律, µV\\n26 : P3-A1 θ 节律, µV\\n27 : P3-A1 α 节律, µV\\n28 : P4-A2 δ 节律,µV\\n29 : P4-A2 θ 节律, µV\\n30 : P4-A2 α 节律, µV\\n31 : P4-A2 β(LF)节律, µV\\n32 : PZ-A2 δ 节律,µV\\n33 : PZ-A2 θ 节律, µV\\n34 : PZ-A2 β(LF)节律, µV\\n35 : O1-A1 δ 节律,µV\\n36 : O1-A1 θ 节律, µV\\n37 : O1-A1 α 节律, µV\\n38 : O1-A1 β(LF)节律, µV\\n39 : O2-A2 θ 节律, µV\\n40 : O2-A2 β(LF)节律, µV\\n41 : F7-A1 δ 节律,µV\\n42 : F7-A1 θ 节律, µV\\n43 : F7-A1 α 节律, µV\\n44 : F7-A1 β(LF)节律, µV\\n45 : F8-A2 θ 节律, µV\\n46 : F8-A2 α 节律, µV\\n47 : F8-A2 β(LF)节律, µV\\n48 : T3-A1 δ 节律,µV\\n49 : T3-A1 α 节律, µV\\n50 : T3-A1 β(LF)节律, µV\\n51 : T4-A2 δ 节律,µV\\n52 : T4-A2 θ 节律, µV\\n53 : T4-A2 α 节律, µV\\n54 : T4-A2 β(LF)节律, µV\\n55 : T5-A1 δ 节律,µV\\n56 : T5-A1 θ 节律, µV\\n57 : T5-A1 α 节律, µV\\n58 : T6-A2 δ 节律,µV\\n59 : T6-A2 θ 节律, µV\\n60 : T6-A2 β(LF)节律, µV\\n\"\n ]\n ],\n [\n [\n \"## 利用随机森林进行特征选择\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"randomForest = RandomForestRegressor().fit(X_withLabel, y_data)\\r\\nmodelrandomForest = SelectFromModel(randomForest, prefit=True)\\r\\nX_randomForest = modelrandomForest.transform(X_withLabel)\\r\\n\\r\\nrandomForestIndexMask = modelrandomForest.get_support() # 获取筛选的mask\\r\\nvalue = X_withLabel[:,randomForestIndexMask].tolist() # 被筛选出来的列的值\\r\\nrandomForestIndexMask = randomForestIndexMask.tolist() \\r\\nrandomForestIndexTrue = []\\r\\nrandomForestIndexFalse = []\\r\\n\\r\\nfor i in range(len(randomForestIndexMask)): # 记录下被筛选的indicator的序号\\r\\n if (randomForestIndexMask[i]==True):\\r\\n randomForestIndexTrue.append(i)\\r\\n if (randomForestIndexMask[i]==False):\\r\\n randomForestIndexFalse.append(i)\\r\\nprint(\\\"被筛选后剩下的特征:\\\")\\r\\nfor i in range(len(randomForestIndexTrue)):\\r\\n print(i+1,\\\":\\\",name[randomForestIndexTrue[i]])\\r\\nprint(\\\"\\\\n被筛选后去掉的特征:\\\")\\r\\nfor i in range(len(randomForestIndexFalse)):\\r\\n print(i+1,\\\":\\\",name[randomForestIndexFalse[i]])\\r\\n\\r\\ndataFrameOfRandomForestRegressionFeature = dataFrame\\r\\nfor i in range(len(randomForestIndexFalse)):\\r\\n dataFrameOfRandomForestRegressionFeature = dataFrameOfRandomForestRegressionFeature.drop([name[randomForestIndexFalse[i]]],axis=1)\\r\\ndataFrameOfRandomForestRegressionFeature.to_excel('/content/drive/MyDrive/DataMining/final/RandomForestFeatureSelectionOfData.xlsx')\\r\\ndataFrameOfRandomForestRegressionFeature\",\n \"被筛选后剩下的特征:\\n1 : FP1-A1 θ 节律, µV\\n2 : FP1-A1 α 节律, µV\\n3 : FP2-A2 θ 节律, µV\\n4 : FP2-A2 β(LF)节律, µV\\n5 : F3-A1 θ 节律, µV\\n6 : F4-A2 θ 节律, µV\\n7 : C3-A1 θ 节律, µV\\n8 : C4-A2 δ 节律,µV\\n9 : C4-A2 θ 节律, µV\\n10 : P3-A1 δ 节律,µV\\n11 : P4-A2 θ 节律, µV\\n12 : PZ-A2 β(LF)节律, µV\\n13 : O1-A1 θ 节律, µV\\n14 : O2-A2 δ 节律,µV\\n15 : O2-A2 θ 节律, µV\\n16 : O2-A2 β(LF)节律, µV\\n17 : F7-A1 θ 节律, µV\\n18 : F8-A2 δ 节律,µV\\n19 : F8-A2 θ 节律, µV\\n20 : F8-A2 α 节律, µV\\n21 : T3-A1 θ 节律, µV\\n22 : T3-A1 β(LF)节律, µV\\n23 : T4-A2 δ 节律,µV\\n24 : T4-A2 θ 节律, µV\\n25 : T4-A2 β(LF)节律, µV\\n26 : T5-A1 θ 节律, µV\\n27 : T5-A1 β(LF)节律, µV\\n28 : T6-A2 θ 节律, µV\\n29 : T6-A2 β(LF)节律, µV\\n\\n被筛选后去掉的特征:\\n1 : FP1-A1 δ 节律,µV\\n2 : FP1-A1 β(LF)节律, µV\\n3 : FP2-A2 δ 节律,µV\\n4 : FP2-A2 α 节律, µV\\n5 : F3-A1 δ 节律,µV\\n6 : F3-A1 α 节律, µV\\n7 : F3-A1 β(LF)节律, µV\\n8 : F4-A2 δ 节律,µV\\n9 : F4-A2 α 节律, µV\\n10 : F4-A2 β(LF)节律, µV\\n11 : FZ-A2 δ 节律,µV\\n12 : FZ-A2 θ 节律, µV\\n13 : FZ-A2 α 节律, µV\\n14 : FZ-A2 β(LF)节律, µV\\n15 : C3-A1 δ 节律,µV\\n16 : C3-A1 α 节律, µV\\n17 : C3-A1 β(LF)节律, µV\\n18 : C4-A2 α 节律, µV\\n19 : C4-A2 β(LF)节律, µV\\n20 : CZ-A1 δ 节律,µV\\n21 : CZ-A1 θ 节律, µV\\n22 : CZ-A1 α 节律, µV\\n23 : CZ-A1 β(LF)节律, µV\\n24 : P3-A1 θ 节律, µV\\n25 : P3-A1 α 节律, µV\\n26 : P3-A1 β(LF)节律, µV\\n27 : P4-A2 δ 节律,µV\\n28 : P4-A2 α 节律, µV\\n29 : P4-A2 β(LF)节律, µV\\n30 : PZ-A2 δ 节律,µV\\n31 : PZ-A2 θ 节律, µV\\n32 : PZ-A2 α 节律, µV\\n33 : O1-A1 δ 节律,µV\\n34 : O1-A1 α 节律, µV\\n35 : O1-A1 β(LF)节律, µV\\n36 : O2-A2 α 节律, µV\\n37 : F7-A1 δ 节律,µV\\n38 : F7-A1 α 节律, µV\\n39 : F7-A1 β(LF)节律, µV\\n40 : F8-A2 β(LF)节律, µV\\n41 : T3-A1 δ 节律,µV\\n42 : T3-A1 α 节律, µV\\n43 : T4-A2 α 节律, µV\\n44 : T5-A1 δ 节律,µV\\n45 : T5-A1 α 节律, µV\\n46 : T6-A2 δ 节律,µV\\n47 : T6-A2 α 节律, µV\\n\"\n ]\n ],\n [\n [\n \"## 利用GBDT进行特征选择\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"GBDTRegressor = GradientBoostingRegressor().fit(X_withLabel, y_data)\\r\\nmodelGBDTRegressor = SelectFromModel(GBDTRegressor, prefit=True)\\r\\nX_GBDTRegressor = modelGBDTRegressor.transform(X_withLabel)\\r\\n\\r\\nGBDTRegressorIndexMask = modelGBDTRegressor.get_support() # 获取筛选的mask\\r\\nvalue = X_withLabel[:,GBDTRegressorIndexMask].tolist() # 被筛选出来的列的值\\r\\nGBDTRegressorIndexMask = GBDTRegressorIndexMask.tolist() \\r\\nGBDTRegressorIndexTrue = []\\r\\nGBDTRegressorIndexFalse = []\\r\\n\\r\\nfor i in range(len(GBDTRegressorIndexMask)): # 记录下被筛选的indicator的序号\\r\\n if (GBDTRegressorIndexMask[i]==True):\\r\\n GBDTRegressorIndexTrue.append(i)\\r\\n if (GBDTRegressorIndexMask[i]==False):\\r\\n GBDTRegressorIndexFalse.append(i)\\r\\nprint(\\\"被筛选后剩下的特征:\\\")\\r\\nfor i in range(len(GBDTRegressorIndexTrue)):\\r\\n print(i+1,\\\":\\\",name[GBDTRegressorIndexTrue[i]])\\r\\nprint(\\\"\\\\n被筛选后去掉的特征:\\\")\\r\\nfor i in range(len(GBDTRegressorIndexFalse)):\\r\\n print(i+1,\\\":\\\",name[GBDTRegressorIndexFalse[i]])\\r\\n\\r\\ndataFrameOfGBDTRegressionFeature = dataFrame\\r\\nfor i in range(len(GBDTRegressorIndexFalse)):\\r\\n dataFrameOfGBDTRegressionFeature = dataFrameOfGBDTRegressionFeature.drop([name[GBDTRegressorIndexFalse[i]]],axis=1)\\r\\ndataFrameOfGBDTRegressionFeature.to_excel('/content/drive/MyDrive/DataMining/final/GBDTRegressorFeatureSelectionOfData.xlsx')\\r\\ndataFrameOfGBDTRegressionFeature\",\n \"被筛选后剩下的特征:\\n1 : FP2-A2 θ 节律, µV\\n2 : FP2-A2 β(LF)节律, µV\\n3 : F3-A1 θ 节律, µV\\n4 : C3-A1 δ 节律,µV\\n5 : C3-A1 θ 节律, µV\\n6 : C4-A2 δ 节律,µV\\n7 : C4-A2 θ 节律, µV\\n8 : CZ-A1 θ 节律, µV\\n9 : P3-A1 δ 节律,µV\\n10 : P3-A1 α 节律, µV\\n11 : P4-A2 θ 节律, µV\\n12 : P4-A2 α 节律, µV\\n13 : PZ-A2 α 节律, µV\\n14 : PZ-A2 β(LF)节律, µV\\n15 : O1-A1 θ 节律, µV\\n16 : O2-A2 δ 节律,µV\\n17 : O2-A2 θ 节律, µV\\n18 : O2-A2 β(LF)节律, µV\\n19 : F8-A2 δ 节律,µV\\n20 : F8-A2 α 节律, µV\\n21 : F8-A2 β(LF)节律, µV\\n22 : T3-A1 θ 节律, µV\\n23 : T4-A2 δ 节律,µV\\n24 : T4-A2 θ 节律, µV\\n25 : T4-A2 β(LF)节律, µV\\n26 : T6-A2 θ 节律, µV\\n27 : T6-A2 β(LF)节律, µV\\n\\n被筛选后去掉的特征:\\n1 : FP1-A1 δ 节律,µV\\n2 : FP1-A1 θ 节律, µV\\n3 : FP1-A1 α 节律, µV\\n4 : FP1-A1 β(LF)节律, µV\\n5 : FP2-A2 δ 节律,µV\\n6 : FP2-A2 α 节律, µV\\n7 : F3-A1 δ 节律,µV\\n8 : F3-A1 α 节律, µV\\n9 : F3-A1 β(LF)节律, µV\\n10 : F4-A2 δ 节律,µV\\n11 : F4-A2 θ 节律, µV\\n12 : F4-A2 α 节律, µV\\n13 : F4-A2 β(LF)节律, µV\\n14 : FZ-A2 δ 节律,µV\\n15 : FZ-A2 θ 节律, µV\\n16 : FZ-A2 α 节律, µV\\n17 : FZ-A2 β(LF)节律, µV\\n18 : C3-A1 α 节律, µV\\n19 : C3-A1 β(LF)节律, µV\\n20 : C4-A2 α 节律, µV\\n21 : C4-A2 β(LF)节律, µV\\n22 : CZ-A1 δ 节律,µV\\n23 : CZ-A1 α 节律, µV\\n24 : CZ-A1 β(LF)节律, µV\\n25 : P3-A1 θ 节律, µV\\n26 : P3-A1 β(LF)节律, µV\\n27 : P4-A2 δ 节律,µV\\n28 : P4-A2 β(LF)节律, µV\\n29 : PZ-A2 δ 节律,µV\\n30 : PZ-A2 θ 节律, µV\\n31 : O1-A1 δ 节律,µV\\n32 : O1-A1 α 节律, µV\\n33 : O1-A1 β(LF)节律, µV\\n34 : O2-A2 α 节律, µV\\n35 : F7-A1 δ 节律,µV\\n36 : F7-A1 θ 节律, µV\\n37 : F7-A1 α 节律, µV\\n38 : F7-A1 β(LF)节律, µV\\n39 : F8-A2 θ 节律, µV\\n40 : T3-A1 δ 节律,µV\\n41 : T3-A1 α 节律, µV\\n42 : T3-A1 β(LF)节律, µV\\n43 : T4-A2 α 节律, µV\\n44 : T5-A1 δ 节律,µV\\n45 : T5-A1 θ 节律, µV\\n46 : T5-A1 α 节律, µV\\n47 : T5-A1 β(LF)节律, µV\\n48 : T6-A2 δ 节律,µV\\n49 : T6-A2 α 节律, µV\\n\"\n ]\n ],\n [\n [\n \"# 分类\",\n \"_____no_output_____\"\n ],\n [\n \"## 利用Lasso进行特征选择\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"lasso = Lasso(alpha = 0.3,max_iter=5000).fit(X_withLabel, y_label)\\r\\nmodelLasso = SelectFromModel(lasso, prefit=True)\\r\\nX_Lasso = modelLasso.transform(X_withLabel)\\r\\n\\r\\nLassoIndexMask = modelLasso.get_support() # 获取筛选的mask\\r\\nvalue = X_withLabel[:,LassoIndexMask].tolist() # 被筛选出来的列的值\\r\\nLassoIndexMask = LassoIndexMask.tolist() \\r\\nLassoIndexTrue = []\\r\\nLassoIndexFalse = []\\r\\n\\r\\nfor i in range(len(LassoIndexMask)): # 记录下被筛选的indicator的序号\\r\\n if (LassoIndexMask[i]==True):\\r\\n LassoIndexTrue.append(i)\\r\\n if (LassoIndexMask[i]==False):\\r\\n LassoIndexFalse.append(i)\\r\\nprint(\\\"被筛选后剩下的特征:\\\")\\r\\nfor i in range(len(LassoIndexTrue)):\\r\\n print(i+1,\\\":\\\",name[LassoIndexTrue[i]])\\r\\nprint(\\\"\\\\n被筛选后去掉的特征:\\\")\\r\\nfor i in range(len(LassoIndexFalse)):\\r\\n print(i+1,\\\":\\\",name[LassoIndexFalse[i]])\\r\\n\\r\\ndataFrameOfLassoClassificationFeature = dataFrame\\r\\nfor i in range(len(LassoIndexFalse)):\\r\\n dataFrameOfLassoClassificationFeature = dataFrameOfLassoClassificationFeature.drop([name[LassoIndexFalse[i]]],axis=1)\\r\\ndataFrameOfLassoClassificationFeature.to_excel('/content/drive/MyDrive/DataMining/final/LassoFeatureSelectionOfLabel.xlsx')\\r\\ndataFrameOfLassoClassificationFeature\",\n \"被筛选后剩下的特征:\\n1 : FP1-A1 α 节律, µV\\n2 : FZ-A2 δ 节律,µV\\n3 : C4-A2 δ 节律,µV\\n4 : CZ-A1 α 节律, µV\\n5 : P3-A1 δ 节律,µV\\n6 : P4-A2 α 节律, µV\\n7 : PZ-A2 δ 节律,µV\\n8 : O2-A2 δ 节律,µV\\n9 : F7-A1 δ 节律,µV\\n10 : F7-A1 α 节律, µV\\n11 : T3-A1 α 节律, µV\\n12 : T4-A2 δ 节律,µV\\n13 : T4-A2 α 节律, µV\\n14 : T5-A1 δ 节律,µV\\n\\n被筛选后去掉的特征:\\n1 : FP1-A1 δ 节律,µV\\n2 : FP1-A1 θ 节律, µV\\n3 : FP1-A1 β(LF)节律, µV\\n4 : FP2-A2 δ 节律,µV\\n5 : FP2-A2 θ 节律, µV\\n6 : FP2-A2 α 节律, µV\\n7 : FP2-A2 β(LF)节律, µV\\n8 : F3-A1 δ 节律,µV\\n9 : F3-A1 θ 节律, µV\\n10 : F3-A1 α 节律, µV\\n11 : F3-A1 β(LF)节律, µV\\n12 : F4-A2 δ 节律,µV\\n13 : F4-A2 θ 节律, µV\\n14 : F4-A2 α 节律, µV\\n15 : F4-A2 β(LF)节律, µV\\n16 : FZ-A2 θ 节律, µV\\n17 : FZ-A2 α 节律, µV\\n18 : FZ-A2 β(LF)节律, µV\\n19 : C3-A1 δ 节律,µV\\n20 : C3-A1 θ 节律, µV\\n21 : C3-A1 α 节律, µV\\n22 : C3-A1 β(LF)节律, µV\\n23 : C4-A2 θ 节律, µV\\n24 : C4-A2 α 节律, µV\\n25 : C4-A2 β(LF)节律, µV\\n26 : CZ-A1 δ 节律,µV\\n27 : CZ-A1 θ 节律, µV\\n28 : CZ-A1 β(LF)节律, µV\\n29 : P3-A1 θ 节律, µV\\n30 : P3-A1 α 节律, µV\\n31 : P3-A1 β(LF)节律, µV\\n32 : P4-A2 δ 节律,µV\\n33 : P4-A2 θ 节律, µV\\n34 : P4-A2 β(LF)节律, µV\\n35 : PZ-A2 θ 节律, µV\\n36 : PZ-A2 α 节律, µV\\n37 : PZ-A2 β(LF)节律, µV\\n38 : O1-A1 δ 节律,µV\\n39 : O1-A1 θ 节律, µV\\n40 : O1-A1 α 节律, µV\\n41 : O1-A1 β(LF)节律, µV\\n42 : O2-A2 θ 节律, µV\\n43 : O2-A2 α 节律, µV\\n44 : O2-A2 β(LF)节律, µV\\n45 : F7-A1 θ 节律, µV\\n46 : F7-A1 β(LF)节律, µV\\n47 : F8-A2 δ 节律,µV\\n48 : F8-A2 θ 节律, µV\\n49 : F8-A2 α 节律, µV\\n50 : F8-A2 β(LF)节律, µV\\n51 : T3-A1 δ 节律,µV\\n52 : T3-A1 θ 节律, µV\\n53 : T3-A1 β(LF)节律, µV\\n54 : T4-A2 θ 节律, µV\\n55 : T4-A2 β(LF)节律, µV\\n56 : T5-A1 θ 节律, µV\\n57 : T5-A1 α 节律, µV\\n58 : T5-A1 β(LF)节律, µV\\n59 : T6-A2 δ 节律,µV\\n60 : T6-A2 θ 节律, µV\\n61 : T6-A2 α 节律, µV\\n62 : T6-A2 β(LF)节律, µV\\n\"\n ]\n ],\n [\n [\n \"## 利用SVC进行特征选择\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"lsvc = LinearSVC(C=10,max_iter=10000,dual=False).fit(X_withLabel, y_label.ravel())\\r\\nmodelLSVC = SelectFromModel(lsvc, prefit=True)\\r\\nX_LSVR = modelLSVR.transform(X_withLabel)\\r\\n\\r\\nSVCIndexMask = modelLSVC.get_support() # 获取筛选的mask\\r\\nvalue = X_withLabel[:,SVCIndexMask].tolist() # 被筛选出来的列的值\\r\\nSVCIndexMask = SVCIndexMask.tolist() \\r\\nSVCIndexTrue = []\\r\\nSVCIndexFalse = []\\r\\n\\r\\nfor i in range(len(SVCIndexMask)): # 记录下被筛选的indicator的序号\\r\\n if (SVCIndexMask[i]==True):\\r\\n SVCIndexTrue.append(i)\\r\\n if (SVCIndexMask[i]==False):\\r\\n SVCIndexFalse.append(i)\\r\\nprint(\\\"被筛选后剩下的特征:\\\")\\r\\nfor i in range(len(SVCIndexTrue)):\\r\\n print(i+1,\\\":\\\",name[SVCIndexTrue[i]])\\r\\nprint(\\\"\\\\n被筛选后去掉的特征:\\\")\\r\\nfor i in range(len(SVCIndexFalse)):\\r\\n print(i+1,\\\":\\\",name[SVCIndexFalse[i]])\\r\\n\\r\\ndataFrameOfLSVClassificationFeature = dataFrame\\r\\nfor i in range(len(SVCIndexFalse)):\\r\\n dataFrameOfLSVClassificationFeature = dataFrameOfLSVClassificationFeature.drop([name[SVCIndexFalse[i]]],axis=1)\\r\\ndataFrameOfLSVClassificationFeature.to_excel('/content/drive/MyDrive/DataMining/final/LSVCFeatureSelectionOfLabel.xlsx')\\r\\ndataFrameOfLSVClassificationFeature\",\n \"被筛选后剩下的特征:\\n1 : FP1-A1 θ 节律, µV\\n2 : FP2-A2 θ 节律, µV\\n3 : FP2-A2 α 节律, µV\\n4 : FP2-A2 β(LF)节律, µV\\n5 : FZ-A2 β(LF)节律, µV\\n6 : C3-A1 θ 节律, µV\\n7 : C3-A1 β(LF)节律, µV\\n8 : C4-A2 δ 节律,µV\\n9 : C4-A2 θ 节律, µV\\n10 : C4-A2 α 节律, µV\\n11 : CZ-A1 δ 节律,µV\\n12 : CZ-A1 θ 节律, µV\\n13 : CZ-A1 α 节律, µV\\n14 : P3-A1 β(LF)节律, µV\\n15 : P4-A2 θ 节律, µV\\n16 : P4-A2 β(LF)节律, µV\\n17 : PZ-A2 β(LF)节律, µV\\n18 : O2-A2 δ 节律,µV\\n19 : O2-A2 θ 节律, µV\\n20 : O2-A2 α 节律, µV\\n21 : F7-A1 θ 节律, µV\\n22 : F7-A1 α 节律, µV\\n23 : F7-A1 β(LF)节律, µV\\n24 : F8-A2 β(LF)节律, µV\\n25 : T4-A2 δ 节律,µV\\n26 : T4-A2 θ 节律, µV\\n27 : T4-A2 α 节律, µV\\n28 : T4-A2 β(LF)节律, µV\\n29 : T5-A1 θ 节律, µV\\n30 : T6-A2 δ 节律,µV\\n31 : T6-A2 α 节律, µV\\n\\n被筛选后去掉的特征:\\n1 : FP1-A1 δ 节律,µV\\n2 : FP1-A1 α 节律, µV\\n3 : FP1-A1 β(LF)节律, µV\\n4 : FP2-A2 δ 节律,µV\\n5 : F3-A1 δ 节律,µV\\n6 : F3-A1 θ 节律, µV\\n7 : F3-A1 α 节律, µV\\n8 : F3-A1 β(LF)节律, µV\\n9 : F4-A2 δ 节律,µV\\n10 : F4-A2 θ 节律, µV\\n11 : F4-A2 α 节律, µV\\n12 : F4-A2 β(LF)节律, µV\\n13 : FZ-A2 δ 节律,µV\\n14 : FZ-A2 θ 节律, µV\\n15 : FZ-A2 α 节律, µV\\n16 : C3-A1 δ 节律,µV\\n17 : C3-A1 α 节律, µV\\n18 : C4-A2 β(LF)节律, µV\\n19 : CZ-A1 β(LF)节律, µV\\n20 : P3-A1 δ 节律,µV\\n21 : P3-A1 θ 节律, µV\\n22 : P3-A1 α 节律, µV\\n23 : P4-A2 δ 节律,µV\\n24 : P4-A2 α 节律, µV\\n25 : PZ-A2 δ 节律,µV\\n26 : PZ-A2 θ 节律, µV\\n27 : PZ-A2 α 节律, µV\\n28 : O1-A1 δ 节律,µV\\n29 : O1-A1 θ 节律, µV\\n30 : O1-A1 α 节律, µV\\n31 : O1-A1 β(LF)节律, µV\\n32 : O2-A2 β(LF)节律, µV\\n33 : F7-A1 δ 节律,µV\\n34 : F8-A2 δ 节律,µV\\n35 : F8-A2 θ 节律, µV\\n36 : F8-A2 α 节律, µV\\n37 : T3-A1 δ 节律,µV\\n38 : T3-A1 θ 节律, µV\\n39 : T3-A1 α 节律, µV\\n40 : T3-A1 β(LF)节律, µV\\n41 : T5-A1 δ 节律,µV\\n42 : T5-A1 α 节律, µV\\n43 : T5-A1 β(LF)节律, µV\\n44 : T6-A2 θ 节律, µV\\n45 : T6-A2 β(LF)节律, µV\\n\"\n ]\n ],\n [\n [\n \"## 利用树进行特征选择\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"decisionTree = DecisionTreeClassifier(random_state=1).fit(X_withLabel, y_label)\\r\\nmodelDecisionTree = SelectFromModel(decisionTree, prefit=True)\\r\\nX_DecisionTree = modelDecisionTree.transform(X_withLabel)\\r\\n\\r\\ndecisionTreeIndexMask = modelDecisionTree.get_support() # 获取筛选的mask\\r\\nvalue = X_withLabel[:,LassoIndexMask].tolist() # 被筛选出来的列的值\\r\\ndecisionTreeIndexMask = decisionTreeIndexMask.tolist() \\r\\ndecisionTreeIndexTrue = []\\r\\ndecisionTreeIndexFalse = []\\r\\n\\r\\nfor i in range(len(decisionTreeIndexMask)): # 记录下被筛选的indicator的序号\\r\\n if (decisionTreeIndexMask[i]==True):\\r\\n decisionTreeIndexTrue.append(i)\\r\\n if (decisionTreeIndexMask[i]==False):\\r\\n decisionTreeIndexFalse.append(i)\\r\\nprint(\\\"被筛选后剩下的特征:\\\")\\r\\nfor i in range(len(decisionTreeIndexTrue)):\\r\\n print(i+1,\\\":\\\",name[decisionTreeIndexTrue[i]])\\r\\nprint(\\\"\\\\n被筛选后去掉的特征:\\\")\\r\\nfor i in range(len(decisionTreeIndexFalse)):\\r\\n print(i+1,\\\":\\\",name[decisionTreeIndexFalse[i]])\\r\\n\\r\\ndataFrameOfDecisionTreeClassificationFeature = dataFrame\\r\\nfor i in range(len(decisionTreeIndexFalse)):\\r\\n dataFrameOfDecisionTreeClassificationFeature = dataFrameOfDecisionTreeClassificationFeature.drop([name[decisionTreeIndexFalse[i]]],axis=1)\\r\\ndataFrameOfDecisionTreeClassificationFeature.to_excel('/content/drive/MyDrive/DataMining/final/DecisionTreeFeatureSelectionOfLabel.xlsx')\\r\\ndataFrameOfDecisionTreeClassificationFeature\",\n \"被筛选后剩下的特征:\\n1 : FP1-A1 δ 节律,µV\\n2 : FP1-A1 α 节律, µV\\n3 : F3-A1 θ 节律, µV\\n4 : C3-A1 θ 节律, µV\\n5 : CZ-A1 δ 节律,µV\\n6 : CZ-A1 β(LF)节律, µV\\n7 : P3-A1 α 节律, µV\\n8 : PZ-A2 β(LF)节律, µV\\n9 : O2-A2 δ 节律,µV\\n10 : O2-A2 β(LF)节律, µV\\n11 : F7-A1 θ 节律, µV\\n12 : T4-A2 δ 节律,µV\\n13 : T5-A1 α 节律, µV\\n14 : T6-A2 α 节律, µV\\n\\n被筛选后去掉的特征:\\n1 : FP1-A1 θ 节律, µV\\n2 : FP1-A1 β(LF)节律, µV\\n3 : FP2-A2 δ 节律,µV\\n4 : FP2-A2 θ 节律, µV\\n5 : FP2-A2 α 节律, µV\\n6 : FP2-A2 β(LF)节律, µV\\n7 : F3-A1 δ 节律,µV\\n8 : F3-A1 α 节律, µV\\n9 : F3-A1 β(LF)节律, µV\\n10 : F4-A2 δ 节律,µV\\n11 : F4-A2 θ 节律, µV\\n12 : F4-A2 α 节律, µV\\n13 : F4-A2 β(LF)节律, µV\\n14 : FZ-A2 δ 节律,µV\\n15 : FZ-A2 θ 节律, µV\\n16 : FZ-A2 α 节律, µV\\n17 : FZ-A2 β(LF)节律, µV\\n18 : C3-A1 δ 节律,µV\\n19 : C3-A1 α 节律, µV\\n20 : C3-A1 β(LF)节律, µV\\n21 : C4-A2 δ 节律,µV\\n22 : C4-A2 θ 节律, µV\\n23 : C4-A2 α 节律, µV\\n24 : C4-A2 β(LF)节律, µV\\n25 : CZ-A1 θ 节律, µV\\n26 : CZ-A1 α 节律, µV\\n27 : P3-A1 δ 节律,µV\\n28 : P3-A1 θ 节律, µV\\n29 : P3-A1 β(LF)节律, µV\\n30 : P4-A2 δ 节律,µV\\n31 : P4-A2 θ 节律, µV\\n32 : P4-A2 α 节律, µV\\n33 : P4-A2 β(LF)节律, µV\\n34 : PZ-A2 δ 节律,µV\\n35 : PZ-A2 θ 节律, µV\\n36 : PZ-A2 α 节律, µV\\n37 : O1-A1 δ 节律,µV\\n38 : O1-A1 θ 节律, µV\\n39 : O1-A1 α 节律, µV\\n40 : O1-A1 β(LF)节律, µV\\n41 : O2-A2 θ 节律, µV\\n42 : O2-A2 α 节律, µV\\n43 : F7-A1 δ 节律,µV\\n44 : F7-A1 α 节律, µV\\n45 : F7-A1 β(LF)节律, µV\\n46 : F8-A2 δ 节律,µV\\n47 : F8-A2 θ 节律, µV\\n48 : F8-A2 α 节律, µV\\n49 : F8-A2 β(LF)节律, µV\\n50 : T3-A1 δ 节律,µV\\n51 : T3-A1 θ 节律, µV\\n52 : T3-A1 α 节律, µV\\n53 : T3-A1 β(LF)节律, µV\\n54 : T4-A2 θ 节律, µV\\n55 : T4-A2 α 节律, µV\\n56 : T4-A2 β(LF)节律, µV\\n57 : T5-A1 δ 节律,µV\\n58 : T5-A1 θ 节律, µV\\n59 : T5-A1 β(LF)节律, µV\\n60 : T6-A2 δ 节律,µV\\n61 : T6-A2 θ 节律, µV\\n62 : T6-A2 β(LF)节律, µV\\n\"\n ]\n ],\n [\n [\n \"## 利用随机森林进行特征选择\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"randomForest = RandomForestRegressor().fit(X_withLabel, y_label)\\r\\nmodelrandomForest = SelectFromModel(randomForest, prefit=True)\\r\\nX_randomForest = modelrandomForest.transform(X_withLabel)\\r\\n\\r\\nrandomForestIndexMask = modelrandomForest.get_support() # 获取筛选的mask\\r\\nvalue = X_withLabel[:,randomForestIndexMask].tolist() # 被筛选出来的列的值\\r\\nrandomForestIndexMask = randomForestIndexMask.tolist() \\r\\nrandomForestIndexTrue = []\\r\\nrandomForestIndexFalse = []\\r\\n\\r\\nfor i in range(len(randomForestIndexMask)): # 记录下被筛选的indicator的序号\\r\\n if (randomForestIndexMask[i]==True):\\r\\n randomForestIndexTrue.append(i)\\r\\n if (randomForestIndexMask[i]==False):\\r\\n randomForestIndexFalse.append(i)\\r\\nprint(\\\"被筛选后剩下的特征:\\\")\\r\\nfor i in range(len(randomForestIndexTrue)):\\r\\n print(i+1,\\\":\\\",name[randomForestIndexTrue[i]])\\r\\nprint(\\\"\\\\n被筛选后去掉的特征:\\\")\\r\\nfor i in range(len(randomForestIndexFalse)):\\r\\n print(i+1,\\\":\\\",name[randomForestIndexFalse[i]])\\r\\n\\r\\ndataFrameOfRandomForestClassificationFeature = dataFrame\\r\\nfor i in range(len(randomForestIndexFalse)):\\r\\n dataFrameOfRandomForestClassificationFeature = dataFrameOfRandomForestClassificationFeature.drop([name[randomForestIndexFalse[i]]],axis=1)\\r\\ndataFrameOfRandomForestClassificationFeature.to_excel('/content/drive/MyDrive/DataMining/final/RandomForestFeatureSelectionOfLabel.xlsx')\\r\\ndataFrameOfRandomForestClassificationFeature\",\n \"被筛选后剩下的特征:\\n1 : FP1-A1 θ 节律, µV\\n2 : FP2-A2 β(LF)节律, µV\\n3 : F4-A2 α 节律, µV\\n4 : F4-A2 β(LF)节律, µV\\n5 : FZ-A2 β(LF)节律, µV\\n6 : C3-A1 β(LF)节律, µV\\n7 : C4-A2 δ 节律,µV\\n8 : C4-A2 θ 节律, µV\\n9 : C4-A2 α 节律, µV\\n10 : CZ-A1 α 节律, µV\\n11 : P3-A1 δ 节律,µV\\n12 : P3-A1 α 节律, µV\\n13 : P3-A1 β(LF)节律, µV\\n14 : P4-A2 δ 节律,µV\\n15 : P4-A2 θ 节律, µV\\n16 : P4-A2 α 节律, µV\\n17 : PZ-A2 β(LF)节律, µV\\n18 : O2-A2 δ 节律,µV\\n19 : O2-A2 β(LF)节律, µV\\n20 : F7-A1 θ 节律, µV\\n21 : F8-A2 α 节律, µV\\n22 : F8-A2 β(LF)节律, µV\\n23 : T3-A1 θ 节律, µV\\n24 : T4-A2 δ 节律,µV\\n25 : T4-A2 θ 节律, µV\\n26 : T4-A2 α 节律, µV\\n27 : T4-A2 β(LF)节律, µV\\n28 : T5-A1 δ 节律,µV\\n29 : T5-A1 θ 节律, µV\\n30 : T5-A1 β(LF)节律, µV\\n31 : T6-A2 θ 节律, µV\\n32 : T6-A2 α 节律, µV\\n33 : T6-A2 β(LF)节律, µV\\n\\n被筛选后去掉的特征:\\n1 : FP1-A1 δ 节律,µV\\n2 : FP1-A1 α 节律, µV\\n3 : FP1-A1 β(LF)节律, µV\\n4 : FP2-A2 δ 节律,µV\\n5 : FP2-A2 θ 节律, µV\\n6 : FP2-A2 α 节律, µV\\n7 : F3-A1 δ 节律,µV\\n8 : F3-A1 θ 节律, µV\\n9 : F3-A1 α 节律, µV\\n10 : F3-A1 β(LF)节律, µV\\n11 : F4-A2 δ 节律,µV\\n12 : F4-A2 θ 节律, µV\\n13 : FZ-A2 δ 节律,µV\\n14 : FZ-A2 θ 节律, µV\\n15 : FZ-A2 α 节律, µV\\n16 : C3-A1 δ 节律,µV\\n17 : C3-A1 θ 节律, µV\\n18 : C3-A1 α 节律, µV\\n19 : C4-A2 β(LF)节律, µV\\n20 : CZ-A1 δ 节律,µV\\n21 : CZ-A1 θ 节律, µV\\n22 : CZ-A1 β(LF)节律, µV\\n23 : P3-A1 θ 节律, µV\\n24 : P4-A2 β(LF)节律, µV\\n25 : PZ-A2 δ 节律,µV\\n26 : PZ-A2 θ 节律, µV\\n27 : PZ-A2 α 节律, µV\\n28 : O1-A1 δ 节律,µV\\n29 : O1-A1 θ 节律, µV\\n30 : O1-A1 α 节律, µV\\n31 : O1-A1 β(LF)节律, µV\\n32 : O2-A2 θ 节律, µV\\n33 : O2-A2 α 节律, µV\\n34 : F7-A1 δ 节律,µV\\n35 : F7-A1 α 节律, µV\\n36 : F7-A1 β(LF)节律, µV\\n37 : F8-A2 δ 节律,µV\\n38 : F8-A2 θ 节律, µV\\n39 : T3-A1 δ 节律,µV\\n40 : T3-A1 α 节律, µV\\n41 : T3-A1 β(LF)节律, µV\\n42 : T5-A1 α 节律, µV\\n43 : T6-A2 δ 节律,µV\\n\"\n ]\n ],\n [\n [\n \"## 利用GBDT进行特征选择\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"GBDTClassifier = GradientBoostingClassifier().fit(X_withLabel, y_label)\\r\\nmodelGBDTClassifier = SelectFromModel(GBDTClassifier, prefit=True)\\r\\nX_GBDTClassifier = modelGBDTClassifier.transform(X_withLabel)\\r\\n\\r\\nGBDTClassifierIndexMask = modelGBDTClassifier.get_support() # 获取筛选的mask\\r\\nvalue = X_withLabel[:,GBDTClassifierIndexMask].tolist() # 被筛选出来的列的值\\r\\nGBDTClassifierIndexMask = GBDTClassifierIndexMask.tolist() \\r\\nGBDTClassifierIndexTrue = []\\r\\nGBDTClassifierIndexFalse = []\\r\\n\\r\\nfor i in range(len(GBDTClassifierIndexMask)): # 记录下被筛选的indicator的序号\\r\\n if (GBDTClassifierIndexMask[i]==True):\\r\\n GBDTClassifierIndexTrue.append(i)\\r\\n if (GBDTClassifierIndexMask[i]==False):\\r\\n GBDTClassifierIndexFalse.append(i)\\r\\nprint(\\\"被筛选后剩下的特征:\\\")\\r\\nfor i in range(len(GBDTClassifierIndexTrue)):\\r\\n print(i+1,\\\":\\\",name[GBDTClassifierIndexTrue[i]])\\r\\nprint(\\\"\\\\n被筛选后去掉的特征:\\\")\\r\\nfor i in range(len(GBDTClassifierIndexFalse)):\\r\\n print(i+1,\\\":\\\",name[GBDTClassifierIndexFalse[i]])\\r\\n\\r\\ndataFrameOfGBDTClassificationFeature = dataFrame\\r\\nfor i in range(len(GBDTClassifierIndexFalse)):\\r\\n dataFrameOfGBDTClassificationFeature = dataFrameOfGBDTClassificationFeature.drop([name[GBDTClassifierIndexFalse[i]]],axis=1)\\r\\ndataFrameOfGBDTClassificationFeature.to_excel('/content/drive/MyDrive/DataMining/final/GBDTClassifierFeatureSelectionOfLabel.xlsx')\\r\\ndataFrameOfGBDTClassificationFeature\",\n \"被筛选后剩下的特征:\\n1 : FP1-A1 α 节律, µV\\n2 : FP2-A2 θ 节律, µV\\n3 : FP2-A2 β(LF)节律, µV\\n4 : C4-A2 θ 节律, µV\\n5 : P3-A1 α 节律, µV\\n6 : P4-A2 α 节律, µV\\n7 : P4-A2 β(LF)节律, µV\\n8 : PZ-A2 β(LF)节律, µV\\n9 : O2-A2 δ 节律,µV\\n10 : F7-A1 δ 节律,µV\\n11 : F8-A2 δ 节律,µV\\n12 : F8-A2 β(LF)节律, µV\\n13 : T3-A1 θ 节律, µV\\n14 : T4-A2 δ 节律,µV\\n15 : T4-A2 θ 节律, µV\\n16 : T5-A1 α 节律, µV\\n\\n被筛选后去掉的特征:\\n1 : FP1-A1 δ 节律,µV\\n2 : FP1-A1 θ 节律, µV\\n3 : FP1-A1 β(LF)节律, µV\\n4 : FP2-A2 δ 节律,µV\\n5 : FP2-A2 α 节律, µV\\n6 : F3-A1 δ 节律,µV\\n7 : F3-A1 θ 节律, µV\\n8 : F3-A1 α 节律, µV\\n9 : F3-A1 β(LF)节律, µV\\n10 : F4-A2 δ 节律,µV\\n11 : F4-A2 θ 节律, µV\\n12 : F4-A2 α 节律, µV\\n13 : F4-A2 β(LF)节律, µV\\n14 : FZ-A2 δ 节律,µV\\n15 : FZ-A2 θ 节律, µV\\n16 : FZ-A2 α 节律, µV\\n17 : FZ-A2 β(LF)节律, µV\\n18 : C3-A1 δ 节律,µV\\n19 : C3-A1 θ 节律, µV\\n20 : C3-A1 α 节律, µV\\n21 : C3-A1 β(LF)节律, µV\\n22 : C4-A2 δ 节律,µV\\n23 : C4-A2 α 节律, µV\\n24 : C4-A2 β(LF)节律, µV\\n25 : CZ-A1 δ 节律,µV\\n26 : CZ-A1 θ 节律, µV\\n27 : CZ-A1 α 节律, µV\\n28 : CZ-A1 β(LF)节律, µV\\n29 : P3-A1 δ 节律,µV\\n30 : P3-A1 θ 节律, µV\\n31 : P3-A1 β(LF)节律, µV\\n32 : P4-A2 δ 节律,µV\\n33 : P4-A2 θ 节律, µV\\n34 : PZ-A2 δ 节律,µV\\n35 : PZ-A2 θ 节律, µV\\n36 : PZ-A2 α 节律, µV\\n37 : O1-A1 δ 节律,µV\\n38 : O1-A1 θ 节律, µV\\n39 : O1-A1 α 节律, µV\\n40 : O1-A1 β(LF)节律, µV\\n41 : O2-A2 θ 节律, µV\\n42 : O2-A2 α 节律, µV\\n43 : O2-A2 β(LF)节律, µV\\n44 : F7-A1 θ 节律, µV\\n45 : F7-A1 α 节律, µV\\n46 : F7-A1 β(LF)节律, µV\\n47 : F8-A2 θ 节律, µV\\n48 : F8-A2 α 节律, µV\\n49 : T3-A1 δ 节律,µV\\n50 : T3-A1 α 节律, µV\\n51 : T3-A1 β(LF)节律, µV\\n52 : T4-A2 α 节律, µV\\n53 : T4-A2 β(LF)节律, µV\\n54 : T5-A1 δ 节律,µV\\n55 : T5-A1 θ 节律, µV\\n56 : T5-A1 β(LF)节律, µV\\n57 : T6-A2 δ 节律,µV\\n58 : T6-A2 θ 节律, µV\\n59 : T6-A2 α 节律, µV\\n60 : T6-A2 β(LF)节律, µV\\n\"\n ]\n ],\n [\n [\n \"# 测试选取的特征\",\n \"_____no_output_____\"\n ],\n [\n \"## 读入PCA和LDA降维后的数据\",\n \"_____no_output_____\"\n ],\n [\n \"## 获取特征选取后的数据\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"RegressionFeatureSelection = [dataFrameOfLassoRegressionFeature,dataFrameOfLSVRegressionFeature,dataFrameOfDecisionTreeRegressionFeature,\\r\\n dataFrameOfRandomForestRegressionFeature,dataFrameOfGBDTRegressionFeature]\\r\\n\\r\\nClassificationFeatureSelection = [dataFrameOfLassoClassificationFeature,dataFrameOfLSVClassificationFeature,dataFrameOfDecisionTreeClassificationFeature,\\r\\n dataFrameOfRandomForestClassificationFeature,dataFrameOfGBDTClassificationFeature]\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"## 筛选回归的特征\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"allMSEResult=[]\\r\\nallr2Result=[]\\r\\n\\r\\nprint(\\\"LR测试结果\\\")\\r\\nfor i in range(len(RegressionFeatureSelection)):\\r\\n tempArray = np.array(RegressionFeatureSelection[i])[:92,:]\\r\\n temp_X = tempArray[:,5:]\\r\\n temp_y = tempArray[:,3]\\r\\n train_X,test_X,train_y,test_y = train_test_split(temp_X,temp_y,test_size=0.2,random_state=4)\\r\\n clf=LinearRegression()\\r\\n clf.fit(train_X,train_y)\\r\\n pred_y = clf.predict(test_X)\\r\\n if(i==0):\\r\\n tempMSE=[]\\r\\n tempr2=[]\\r\\n tempMSE.append(mean_squared_error(test_y,pred_y))\\r\\n tempr2.append(r2_score(test_y,pred_y))\\r\\n if(i==len(RegressionFeatureSelection)-1):\\r\\n allMSEResult.append(min(tempMSE))\\r\\n allr2Result.append(max(tempr2))\\r\\n print('Mean squared error: %.2f'\\r\\n % mean_squared_error(test_y, pred_y))\\r\\n print('Coefficient of determination: %.2f'\\r\\n % r2_score(test_y, pred_y))\\r\\n\\r\\nprint(\\\"\\\\nSVR测试结果\\\")\\r\\nfor i in range(len(RegressionFeatureSelection)):\\r\\n tempArray = np.array(RegressionFeatureSelection[i])[:92,:]\\r\\n temp_X = tempArray[:,5:]\\r\\n temp_y = tempArray[:,3]\\r\\n train_X,test_X,train_y,test_y = train_test_split(temp_X,temp_y,test_size=0.2,random_state=4)\\r\\n clf=SVR()\\r\\n clf.fit(train_X,train_y)\\r\\n pred_y = clf.predict(test_X)\\r\\n if(i==0):\\r\\n tempMSE=[]\\r\\n tempr2=[]\\r\\n tempMSE.append(mean_squared_error(test_y,pred_y))\\r\\n tempr2.append(r2_score(test_y,pred_y))\\r\\n if(i==len(RegressionFeatureSelection)-1):\\r\\n allMSEResult.append(min(tempMSE))\\r\\n allr2Result.append(max(tempr2))\\r\\n print('Mean squared error: %.2f'\\r\\n % mean_squared_error(test_y, pred_y))\\r\\n print('Coefficient of determination: %.2f'\\r\\n % r2_score(test_y, pred_y))\\r\\n \\r\\nprint(\\\"\\\\n决策树测试结果\\\")\\r\\nfor i in range(len(RegressionFeatureSelection)):\\r\\n tempArray = np.array(RegressionFeatureSelection[i])[:92,:]\\r\\n temp_X = tempArray[:,5:]\\r\\n temp_y = tempArray[:,3]\\r\\n train_X,test_X,train_y,test_y = train_test_split(temp_X,temp_y,test_size=0.2,random_state=4)\\r\\n clf=DecisionTreeRegressor(random_state=4)\\r\\n clf.fit(train_X,train_y)\\r\\n pred_y = clf.predict(test_X)\\r\\n if(i==0):\\r\\n tempMSE=[]\\r\\n tempr2=[]\\r\\n tempMSE.append(mean_squared_error(test_y,pred_y))\\r\\n tempr2.append(r2_score(test_y,pred_y))\\r\\n if(i==len(RegressionFeatureSelection)-1):\\r\\n allMSEResult.append(min(tempMSE))\\r\\n allr2Result.append(max(tempr2))\\r\\n print('Mean squared error: %.2f'\\r\\n % mean_squared_error(test_y, pred_y))\\r\\n print('Coefficient of determination: %.2f'\\r\\n % r2_score(test_y, pred_y))\\r\\n\\r\\nprint(\\\"\\\\nGBDT测试结果\\\")\\r\\nfor i in range(len(RegressionFeatureSelection)):\\r\\n tempArray = np.array(RegressionFeatureSelection[i])[:92,:]\\r\\n temp_X = tempArray[:,5:]\\r\\n temp_y = tempArray[:,3]\\r\\n train_X,test_X,train_y,test_y = train_test_split(temp_X,temp_y,test_size=0.2,random_state=4)\\r\\n clf=GradientBoostingRegressor(random_state=4)\\r\\n clf.fit(train_X,train_y)\\r\\n pred_y = clf.predict(test_X)\\r\\n if(i==0):\\r\\n tempMSE=[]\\r\\n tempr2=[]\\r\\n tempMSE.append(mean_squared_error(test_y,pred_y))\\r\\n tempr2.append(r2_score(test_y,pred_y))\\r\\n if(i==len(RegressionFeatureSelection)-1):\\r\\n allMSEResult.append(min(tempMSE))\\r\\n allr2Result.append(max(tempr2))\\r\\n print('Mean squared error: %.2f'\\r\\n % mean_squared_error(test_y, pred_y))\\r\\n print('Coefficient of determination: %.2f'\\r\\n % r2_score(test_y, pred_y))\\r\\n \\r\\nprint(\\\"\\\\n随机森林测试结果\\\")\\r\\nfor i in range(len(RegressionFeatureSelection)):\\r\\n tempArray = np.array(RegressionFeatureSelection[i])[:92,:]\\r\\n temp_X = tempArray[:,5:]\\r\\n temp_y = tempArray[:,3]\\r\\n train_X,test_X,train_y,test_y = train_test_split(temp_X,temp_y,test_size=0.2,random_state=4)\\r\\n clf=RandomForestRegressor(random_state=4)\\r\\n clf.fit(train_X,train_y)\\r\\n pred_y = clf.predict(test_X)\\r\\n if(i==0):\\r\\n tempMSE=[]\\r\\n tempr2=[]\\r\\n tempMSE.append(mean_squared_error(test_y,pred_y))\\r\\n tempr2.append(r2_score(test_y,pred_y))\\r\\n if(i==len(RegressionFeatureSelection)-1):\\r\\n allMSEResult.append(min(tempMSE))\\r\\n allr2Result.append(max(tempr2))\\r\\n print('Mean squared error: %.2f'\\r\\n % mean_squared_error(test_y, pred_y))\\r\\n print('Coefficient of determination: %.2f'\\r\\n % r2_score(test_y, pred_y))\\r\\n \\r\\nmodelNamelist = ['LR','SVR','决策树','GBDT','随机森林']\\r\\nfor i in range(5):\\r\\n if(i==0):\\r\\n print()\\r\\n print(modelNamelist[i]+\\\"测试结果\\\")\\r\\n print('Best MSE -',i+1,': %.2f'\\r\\n % (allMSEResult)[i])\\r\\n print('Best R2-Score -',i+1,': %.2f\\\\n'\\r\\n % (allr2Result)[i])\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"## 原始特征回归表现\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"print(\\\"LR测试结果\\\")\\r\\ntempArray = dataArray[:92,:]\\r\\ntemp_X = tempArray[:,5:]\\r\\ntemp_y = tempArray[:,3].astype(int)\\r\\ntrain_X,test_X,train_y,test_y = train_test_split(temp_X,temp_y,test_size=0.2,random_state=4)\\r\\nclf=LinearRegression()\\r\\nclf.fit(train_X,train_y)\\r\\npred_y = clf.predict(test_X)\\r\\nprint('Mean squared error: %.2f'\\r\\n % mean_squared_error(test_y, pred_y))\\r\\nprint('R2-Score: %.2f'\\r\\n % r2_score(test_y, pred_y))\\r\\n\\r\\nprint(\\\"\\\\nSVR测试结果\\\")\\r\\ntempArray = dataArray[:92,:]\\r\\ntemp_X = tempArray[:,5:]\\r\\ntemp_y = tempArray[:,3].astype(int)\\r\\ntrain_X,test_X,train_y,test_y = train_test_split(temp_X,temp_y,test_size=0.2,random_state=4)\\r\\nclf=SVR()\\r\\nclf.fit(train_X,train_y)\\r\\npred_y = clf.predict(test_X)\\r\\nprint('Mean squared error: %.2f'\\r\\n % mean_squared_error(test_y, pred_y))\\r\\nprint('R2-Score: %.2f'\\r\\n % r2_score(test_y, pred_y))\\r\\n\\r\\nprint(\\\"\\\\n决策树测试结果\\\")\\r\\ntempArray = dataArray[:92,:]\\r\\ntemp_X = tempArray[:,5:]\\r\\ntemp_y = tempArray[:,3].astype(int)\\r\\ntrain_X,test_X,train_y,test_y = train_test_split(temp_X,temp_y,test_size=0.2,random_state=4)\\r\\nclf=DecisionTreeRegressor(random_state=0)\\r\\nclf.fit(train_X,train_y)\\r\\npred_y = clf.predict(test_X)\\r\\nprint('Mean squared error: %.2f'\\r\\n % mean_squared_error(test_y, pred_y))\\r\\nprint('R2-Score: %.2f'\\r\\n % r2_score(test_y, pred_y))\\r\\n\\r\\nprint(\\\"\\\\nGBDT测试结果\\\")\\r\\ntempArray = dataArray[:92,:]\\r\\ntemp_X = tempArray[:,5:]\\r\\ntemp_y = tempArray[:,3].astype(int)\\r\\ntrain_X,test_X,train_y,test_y = train_test_split(temp_X,temp_y,test_size=0.2,random_state=4)\\r\\nclf=GradientBoostingRegressor(random_state=0)\\r\\nclf.fit(train_X,train_y)\\r\\npred_y = clf.predict(test_X)\\r\\nprint('Mean squared error: %.2f'\\r\\n % mean_squared_error(test_y, pred_y))\\r\\nprint('R2-Score: %.2f'\\r\\n % r2_score(test_y, pred_y))\\r\\n\\r\\nprint(\\\"\\\\n随机森林测试结果\\\")\\r\\ntempArray = dataArray[:92,:]\\r\\ntemp_X = tempArray[:,5:]\\r\\ntemp_y = tempArray[:,3].astype(int)\\r\\ntrain_X,test_X,train_y,test_y = train_test_split(temp_X,temp_y,test_size=0.2,random_state=4)\\r\\nclf=RandomForestRegressor(random_state=0)\\r\\nclf.fit(train_X,train_y)\\r\\npred_y = clf.predict(test_X)\\r\\nprint('Mean squared error: %.2f'\\r\\n % mean_squared_error(test_y, pred_y))\\r\\nprint('R2-Score: %.2f'\\r\\n % r2_score(test_y, pred_y))\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"## 筛选分类的特征\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"allAccuracyResult=[]\\r\\nallF1Result=[]\\r\\nprint(\\\"LR测试结果\\\")\\r\\nfor i in range(len(ClassificationFeatureSelection)):\\r\\n tempArray = np.array(ClassificationFeatureSelection[i])[:92,:]\\r\\n temp_X = tempArray[:,5:]\\r\\n temp_y = tempArray[:,4].astype(int)\\r\\n train_X,test_X,train_y,test_y = train_test_split(temp_X,temp_y,test_size=0.2,random_state=4)\\r\\n clf=LogisticRegression(max_iter=10000)\\r\\n clf.fit(train_X,train_y)\\r\\n pred_y = clf.predict(test_X)\\r\\n if(i==0):\\r\\n tempAccuracy=[]\\r\\n tempF1=[]\\r\\n tempAccuracy.append(accuracy_score(test_y,pred_y))\\r\\n tempF1.append(f1_score(test_y,pred_y))\\r\\n if(i==len(ClassificationFeatureSelection)-1):\\r\\n allAccuracyResult.append(max(tempAccuracy))\\r\\n allF1Result.append(max(tempF1))\\r\\n print('Accuracy: %.2f'\\r\\n % accuracy_score(test_y, pred_y))\\r\\n print('F1-Score: %.2f\\\\n'\\r\\n % f1_score(test_y, pred_y))\\r\\n\\r\\nprint(\\\"\\\\nSVC测试结果\\\")\\r\\nfor i in range(len(ClassificationFeatureSelection)):\\r\\n tempArray = np.array(ClassificationFeatureSelection[i])[:92,:]\\r\\n temp_X = tempArray[:,5:]\\r\\n temp_y = tempArray[:,4].astype(int)\\r\\n train_X,test_X,train_y,test_y = train_test_split(temp_X,temp_y,test_size=0.2,random_state=4)\\r\\n clf=SVC()\\r\\n clf.fit(train_X,train_y)\\r\\n pred_y = clf.predict(test_X)\\r\\n if(i==0):\\r\\n tempAccuracy=[]\\r\\n tempF1=[]\\r\\n tempAccuracy.append(accuracy_score(test_y,pred_y))\\r\\n tempF1.append(f1_score(test_y,pred_y))\\r\\n if(i==len(ClassificationFeatureSelection)-1):\\r\\n allAccuracyResult.append(max(tempAccuracy))\\r\\n allF1Result.append(max(tempF1))\\r\\n print('Accuracy: %.2f'\\r\\n % accuracy_score(test_y, pred_y))\\r\\n print('F1-Score: %.2f\\\\n'\\r\\n % f1_score(test_y, pred_y))\\r\\n \\r\\nprint(\\\"\\\\n决策树测试结果\\\")\\r\\nfor i in range(len(ClassificationFeatureSelection)):\\r\\n tempArray = np.array(ClassificationFeatureSelection[i])[:92,:]\\r\\n temp_X = tempArray[:,5:]\\r\\n temp_y = tempArray[:,4].astype(int)\\r\\n train_X,test_X,train_y,test_y = train_test_split(temp_X,temp_y,test_size=0.2,random_state=4)\\r\\n clf=DecisionTreeClassifier(random_state=0)\\r\\n clf.fit(train_X,train_y)\\r\\n pred_y = clf.predict(test_X)\\r\\n if(i==0):\\r\\n tempAccuracy=[]\\r\\n tempF1=[]\\r\\n tempAccuracy.append(accuracy_score(test_y,pred_y))\\r\\n tempF1.append(f1_score(test_y,pred_y))\\r\\n if(i==len(ClassificationFeatureSelection)-1):\\r\\n allAccuracyResult.append(max(tempAccuracy))\\r\\n allF1Result.append(max(tempF1))\\r\\n print('Accuracy: %.2f'\\r\\n % accuracy_score(test_y, pred_y))\\r\\n print('F1-Score: %.2f\\\\n'\\r\\n % f1_score(test_y, pred_y))\\r\\n\\r\\nprint(\\\"\\\\nGBDT测试结果\\\")\\r\\nfor i in range(len(ClassificationFeatureSelection)):\\r\\n tempArray = np.array(ClassificationFeatureSelection[i])[:92,:]\\r\\n temp_X = tempArray[:,5:]\\r\\n temp_y = tempArray[:,4].astype(int)\\r\\n train_X,test_X,train_y,test_y = train_test_split(temp_X,temp_y,test_size=0.2,random_state=4)\\r\\n clf=GradientBoostingClassifier(random_state=0)\\r\\n clf.fit(train_X,train_y)\\r\\n pred_y = clf.predict(test_X)\\r\\n if(i==0):\\r\\n tempAccuracy=[]\\r\\n tempF1=[]\\r\\n tempAccuracy.append(accuracy_score(test_y,pred_y))\\r\\n tempF1.append(f1_score(test_y,pred_y))\\r\\n if(i==len(ClassificationFeatureSelection)-1):\\r\\n allAccuracyResult.append(max(tempAccuracy))\\r\\n allF1Result.append(max(tempF1))\\r\\n print('Accuracy: %.2f'\\r\\n % accuracy_score(test_y, pred_y))\\r\\n print('F1-Score: %.2f\\\\n'\\r\\n % f1_score(test_y, pred_y))\\r\\n \\r\\nprint(\\\"\\\\n随机森林测试结果\\\")\\r\\nfor i in range(len(ClassificationFeatureSelection)):\\r\\n tempArray = np.array(ClassificationFeatureSelection[i])[:92,:]\\r\\n temp_X = tempArray[:,5:]\\r\\n temp_y = tempArray[:,4].astype(int)\\r\\n train_X,test_X,train_y,test_y = train_test_split(temp_X,temp_y,test_size=0.2,random_state=4)\\r\\n clf=RandomForestClassifier(random_state=0)\\r\\n clf.fit(train_X,train_y)\\r\\n pred_y = clf.predict(test_X)\\r\\n if(i==0):\\r\\n tempAccuracy=[]\\r\\n tempF1=[]\\r\\n tempAccuracy.append(accuracy_score(test_y,pred_y))\\r\\n tempF1.append(f1_score(test_y,pred_y))\\r\\n if(i==len(ClassificationFeatureSelection)-1):\\r\\n allAccuracyResult.append(max(tempAccuracy))\\r\\n allF1Result.append(max(tempF1))\\r\\n print('Accuracy: %.2f'\\r\\n % accuracy_score(test_y, pred_y))\\r\\n print('F1-Score: %.2f\\\\n'\\r\\n % f1_score(test_y, pred_y))\\r\\n\\r\\nmodelNamelist = ['LR','SVR','决策树','GBDT','随机森林']\\r\\nfor i in range(5):\\r\\n if(i==0):\\r\\n print()\\r\\n print(modelNamelist[i]+\\\"测试结果\\\")\\r\\n print('Best Accuracy -',i+1,': %.2f'\\r\\n % (allAccuracyResult)[i])\\r\\n print('Best F1-Score -',i+1,': %.2f\\\\n'\\r\\n % (allF1Result)[i])\\r\\n\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"## 原始特征分类表现\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"print(\\\"LR测试结果\\\")\\r\\ntempArray = dataArray[:92,:]\\r\\ntemp_X = tempArray[:,5:]\\r\\ntemp_y = tempArray[:,4].astype(int)\\r\\ntrain_X,test_X,train_y,test_y = train_test_split(temp_X,temp_y,test_size=0.2,random_state=4)\\r\\nclf=LogisticRegression(max_iter=10000)\\r\\nclf.fit(train_X,train_y)\\r\\npred_y = clf.predict(test_X)\\r\\nprint('Accuracy: %.2f'\\r\\n % accuracy_score(test_y, pred_y))\\r\\nprint('F1-Score: %.2f'\\r\\n % f1_score(test_y, pred_y))\\r\\n\\r\\nprint(\\\"\\\\nSVR测试结果\\\")\\r\\ntempArray = dataArray[:92,:]\\r\\ntemp_X = tempArray[:,5:]\\r\\ntemp_y = tempArray[:,4].astype(int)\\r\\ntrain_X,test_X,train_y,test_y = train_test_split(temp_X,temp_y,test_size=0.2,random_state=4)\\r\\nclf=SVC()\\r\\nclf.fit(train_X,train_y)\\r\\npred_y = clf.predict(test_X)\\r\\nprint('Accuracy: %.2f'\\r\\n % accuracy_score(test_y, pred_y))\\r\\nprint('F1-Score: %.2f'\\r\\n % f1_score(test_y, pred_y))\\r\\n\\r\\nprint(\\\"\\\\n决策树测试结果\\\")\\r\\ntempArray = dataArray[:92,:]\\r\\ntemp_X = tempArray[:,5:]\\r\\ntemp_y = tempArray[:,4].astype(int)\\r\\ntrain_X,test_X,train_y,test_y = train_test_split(temp_X,temp_y,test_size=0.2,random_state=4)\\r\\nclf=DecisionTreeClassifier(random_state=0)\\r\\nclf.fit(train_X,train_y)\\r\\npred_y = clf.predict(test_X)\\r\\nprint('Accuracy: %.2f'\\r\\n % accuracy_score(test_y, pred_y))\\r\\nprint('F1-Score: %.2f'\\r\\n % f1_score(test_y, pred_y))\\r\\n\\r\\nprint(\\\"\\\\nGBDT测试结果\\\")\\r\\ntempArray = dataArray[:92,:]\\r\\ntemp_X = tempArray[:,5:]\\r\\ntemp_y = tempArray[:,4].astype(int)\\r\\ntrain_X,test_X,train_y,test_y = train_test_split(temp_X,temp_y,test_size=0.2,random_state=4)\\r\\nclf=GradientBoostingClassifier(random_state=0)\\r\\nclf.fit(train_X,train_y)\\r\\npred_y = clf.predict(test_X)\\r\\nprint('Accuracy: %.2f'\\r\\n % accuracy_score(test_y, pred_y))\\r\\nprint('F1-Score: %.2f'\\r\\n % f1_score(test_y, pred_y))\\r\\n\\r\\nprint(\\\"\\\\n随机森林测试结果\\\")\\r\\ntempArray = dataArray[:92,:]\\r\\ntemp_X = tempArray[:,5:]\\r\\ntemp_y = tempArray[:,4].astype(int)\\r\\ntrain_X,test_X,train_y,test_y = train_test_split(temp_X,temp_y,test_size=0.2,random_state=4)\\r\\nclf=RandomForestClassifier(random_state=0)\\r\\nclf.fit(train_X,train_y)\\r\\npred_y = clf.predict(test_X)\\r\\nprint('Accuracy: %.2f'\\r\\n % accuracy_score(test_y, pred_y))\\r\\nprint('F1-Score: %.2f'\\r\\n % f1_score(test_y, pred_y))\",\n \"_____no_output_____\"\n ]\n ]\n]"},"cell_types":{"kind":"list like","value":["markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code"],"string":"[\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\"\n]"},"cell_type_groups":{"kind":"list like","value":[["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown","markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown","markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown","markdown","markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"]],"string":"[\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\",\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\",\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\",\n \"markdown\",\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ]\n]"}}},{"rowIdx":1459070,"cells":{"hexsha":{"kind":"string","value":"e7f029cfe9e3cfe9913500999f0785c3a736c719"},"size":{"kind":"number","value":45112,"string":"45,112"},"ext":{"kind":"string","value":"ipynb"},"lang":{"kind":"string","value":"Jupyter Notebook"},"max_stars_repo_path":{"kind":"string","value":"class02a_igraph_R.ipynb"},"max_stars_repo_name":{"kind":"string","value":"curiositymap/Networks-in-Computational-Biology"},"max_stars_repo_head_hexsha":{"kind":"string","value":"c7734cf2c03c7a794ab6990d433b1614c1837b58"},"max_stars_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_stars_count":{"kind":"number","value":11,"string":"11"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2020-09-17T14:59:30.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2022-03-29T16:35:39.000Z"},"max_issues_repo_path":{"kind":"string","value":"class02a_igraph_R.ipynb"},"max_issues_repo_name":{"kind":"string","value":"curiositymap/Networks-in-Computational-Biology"},"max_issues_repo_head_hexsha":{"kind":"string","value":"c7734cf2c03c7a794ab6990d433b1614c1837b58"},"max_issues_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"class02a_igraph_R.ipynb"},"max_forks_repo_name":{"kind":"string","value":"curiositymap/Networks-in-Computational-Biology"},"max_forks_repo_head_hexsha":{"kind":"string","value":"c7734cf2c03c7a794ab6990d433b1614c1837b58"},"max_forks_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_forks_count":{"kind":"number","value":5,"string":"5"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2020-03-12T19:21:56.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2022-03-28T08:23:58.000Z"},"avg_line_length":{"kind":"number","value":121.2688172043,"string":"121.268817"},"max_line_length":{"kind":"number","value":32680,"string":"32,680"},"alphanum_fraction":{"kind":"number","value":0.8287151977,"string":"0.828715"},"cells":{"kind":"list like","value":[[["# CSX46: Class session 2\n## *Introduction to the igraph package and the Pathway Commons network in SIF format*\n\n### Objective: load a network of human molecular interactions and create three igraph `Graph` objects from it (one for protein-protein interactions, one for metabolism interactions, and one for directed protein-protein interactions)","_____no_output_____"],["OK, we are going to read in the Pathway Commons data in SIF format. Recall that a SIF file is a tab-separated value file. You can find the file as `shared/pathway_commons.sif`. Load it into a data frame `pcdf` using the built-in function `read.table`. Don't forget to specify that the separator is the tab `\\t`, and that there is no quoting allowed (`quote=\"\"`). Use the `col.names` argument to name the three columns `species1`, `interaction_type`, and `species2`. Make sure to specify that there is no header and that `stringsAsFactors=FALSE`.\n\nFor help on using `read.table`, just type ?read.table\n\nNote: for each row, the `interaction_type` column contains one of 11 different interaction types (identified by a string, like `interacts-with` or `controls-production-of`). ","_____no_output_____"]],[["pcdf <- read.table(\"shared/pathway_commons.sif\",\n sep=\"\\t\",\n quote=\"\",\n comment.char=\"\",\n stringsAsFactors=FALSE,\n header=FALSE,\n col.names=c(\"species1\",\"interaction_type\",\"species2\"))\n","_____no_output_____"]],[["Let's take a peek at `pcdf` using the `head` function:","_____no_output_____"]],[["head(pcdf)","_____no_output_____"],["library(igraph)\n\ninteraction_types_ppi <- c(\"interacts-with\",\n \"in-complex-with\",\n \"neighbor-of\")\n\ninteraction_types_metab <- c(\"controls-production-of\",\n \"consumption-controlled-by\",\n \"controls-production-of\",\n \"controls-transport-of-chemical\")\n\ninteraction_types_ppd <- c(\"catalysis-precedes\",\n \"controls-phosphorylation-of\",\n \"controls-state-change-of\",\n \"controls-transport-of\",\n \"controls-expression-of\")","\nAttaching package: ‘igraph’\n\n\nThe following objects are masked from ‘package:stats’:\n\n decompose, spectrum\n\n\nThe following object is masked from ‘package:base’:\n\n union\n\n\n"]],[["Subset data frame `pcdf` to obtain only the rows whose interactions are in `interaction_types_ppi`, and select only columns 1 and 3:","_____no_output_____"]],[["pcdf_ppi <- pcdf[pcdf$interaction_type %in% interaction_types_ppi,c(1,3)]","_____no_output_____"]],[["Use the `igraph` function `graph_from_data_farme` to build a network from the edge-list data in `pcdf_ppi`; use `print` to see a summary of the graph:","_____no_output_____"]],[["graph_ppi <- graph_from_data_frame(pcdf_ppi,\n directed=FALSE)\nprint(graph_ppi)","IGRAPH ba9e496 UN-- 17020 523498 -- \n+ attr: name (v/c)\n+ edges from ba9e496 (vertex names):\n [1] A1BG--ABCC6 A1BG--ANXA7 A1BG--CDKN1A A1BG--CRISP3 A1BG--GDPD1 \n [6] A1BG--GRB2 A1BG--GRB7 A1BG--HNF4A A1BG--ONECUT1 A1BG--PIK3CA \n[11] A1BG--PIK3R1 A1BG--PRDX4 A1BG--PTPN11 A1BG--SETD7 A1BG--SMN1 \n[16] A1BG--SMN2 A1BG--SNCA A1BG--SOS1 A1BG--TK1 A1CF--ACBD3 \n[21] A1CF--ACLY A1CF--APOBEC1 A1CF--APOBEC1 A1CF--ATF2 A1CF--CELF2 \n[26] A1CF--CTNNB1 A1CF--E2F1 A1CF--E2F3 A1CF--E2F4 A1CF--FHL3 \n[31] A1CF--HNF1A A1CF--HNF4A A1CF--JUN A1CF--KAT5 A1CF--KHSRP \n[36] A1CF--MBD2 A1CF--MBD3 A1CF--NRF1 A1CF--RBL2 A1CF--REL \n+ ... omitted several edges\n"]],[["Do the same for the metabolic network:","_____no_output_____"]],[["pcdf_metab <- pcdf[pcdf$interaction_type %in% interaction_types_metab, c(1,3)]\ngraph_metab <- graph_from_data_frame(pcdf_metab,\n directed=TRUE)\nprint(graph_metab)","IGRAPH 77472bf DN-- 7620 38145 -- \n+ attr: name (v/c)\n+ edges from 77472bf (vertex names):\n [1] A4GALT->CHEBI:17659 A4GALT->CHEBI:17950 A4GALT->CHEBI:18307\n [4] A4GALT->CHEBI:18313 A4GALT->CHEBI:58223 A4GALT->CHEBI:67119\n [7] A4GNT ->CHEBI:17659 A4GNT ->CHEBI:58223 AAAS ->CHEBI:1604 \n[10] AAAS ->CHEBI:2274 AACS ->CHEBI:13705 AACS ->CHEBI:15345\n[13] AACS ->CHEBI:17369 AACS ->CHEBI:18361 AACS ->CHEBI:29888\n[16] AACS ->CHEBI:57286 AACS ->CHEBI:57287 AACS ->CHEBI:57288\n[19] AACS ->CHEBI:57392 AACS ->CHEBI:58280 AADAC ->CHEBI:17790\n[22] AADAC ->CHEBI:40574 AADAC ->CHEBI:4743 AADAC ->CHEBI:85505\n+ ... omitted several edges\n"]],[["Do the same for the directed protein-protein interactions:","_____no_output_____"]],[["pcdf_ppd <- pcdf[pcdf$interaction_type %in% interaction_types_ppd, c(1,3)]\ngraph_ppd <- graph_from_data_frame(pcdf_ppd,\n directed=TRUE)\nprint(graph_ppd)","IGRAPH DN-- 16063 359713 -- \n+ attr: name (v/c), interaction_type (e/c)\nIGRAPH DN-- 16063 359713 -- \n+ attr: name (v/c), interaction_type (e/c)\n+ edges (vertex names):\n [1] A1BG ->A2M A1BG ->AKT1 A1BG ->AKT1 A2M ->APOA1 \n [5] A2M ->CDC42 A2M ->RAC1 A2M ->RAC2 A2M ->RAC3 \n [9] A2M ->RHOA A2M ->RHOBTB1 A2M ->RHOBTB2 A2M ->RHOB \n[13] A2M ->RHOC A2M ->RHOD A2M ->RHOF A2M ->RHOG \n[17] A2M ->RHOH A2M ->RHOJ A2M ->RHOQ A2M ->RHOT1 \n[21] A2M ->RHOT2 A2M ->RHOU A2M ->RHOV A4GALT->ABO \n[25] A4GALT->AK3 A4GALT->B3GALNT1 A4GALT->B3GALT1 A4GALT->B3GALT2 \n[29] A4GALT->B3GALT4 A4GALT->B3GALT5 A4GALT->B3GALT6 A4GALT->B3GAT2 \n+ ... omitted several edges\n"]],[["Question: of the three networks that you just created, which has the most edges?","_____no_output_____"],["Next, we need to create a small graph. Let's make a three-vertex undirected graph from an edge-list. Let's connect all vertices to all other vertices: 1<->2, 2<->3, 3<->1. We'll once again use graph_from_data_farme to do this:","_____no_output_____"]],[["testgraph <- graph_from_data_frame(data.frame(c(1,2,3), c(2,3,1)), directed=FALSE)","_____no_output_____"]],[["Now let's plot the small test graph:","_____no_output_____"]],[["plot(testgraph)","_____no_output_____"]]],"string":"[\n [\n [\n \"# CSX46: Class session 2\\n## *Introduction to the igraph package and the Pathway Commons network in SIF format*\\n\\n### Objective: load a network of human molecular interactions and create three igraph `Graph` objects from it (one for protein-protein interactions, one for metabolism interactions, and one for directed protein-protein interactions)\",\n \"_____no_output_____\"\n ],\n [\n \"OK, we are going to read in the Pathway Commons data in SIF format. Recall that a SIF file is a tab-separated value file. You can find the file as `shared/pathway_commons.sif`. Load it into a data frame `pcdf` using the built-in function `read.table`. Don't forget to specify that the separator is the tab `\\\\t`, and that there is no quoting allowed (`quote=\\\"\\\"`). Use the `col.names` argument to name the three columns `species1`, `interaction_type`, and `species2`. Make sure to specify that there is no header and that `stringsAsFactors=FALSE`.\\n\\nFor help on using `read.table`, just type ?read.table\\n\\nNote: for each row, the `interaction_type` column contains one of 11 different interaction types (identified by a string, like `interacts-with` or `controls-production-of`). \",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"pcdf <- read.table(\\\"shared/pathway_commons.sif\\\",\\n sep=\\\"\\\\t\\\",\\n quote=\\\"\\\",\\n comment.char=\\\"\\\",\\n stringsAsFactors=FALSE,\\n header=FALSE,\\n col.names=c(\\\"species1\\\",\\\"interaction_type\\\",\\\"species2\\\"))\\n\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"Let's take a peek at `pcdf` using the `head` function:\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"head(pcdf)\",\n \"_____no_output_____\"\n ],\n [\n \"library(igraph)\\n\\ninteraction_types_ppi <- c(\\\"interacts-with\\\",\\n \\\"in-complex-with\\\",\\n \\\"neighbor-of\\\")\\n\\ninteraction_types_metab <- c(\\\"controls-production-of\\\",\\n \\\"consumption-controlled-by\\\",\\n \\\"controls-production-of\\\",\\n \\\"controls-transport-of-chemical\\\")\\n\\ninteraction_types_ppd <- c(\\\"catalysis-precedes\\\",\\n \\\"controls-phosphorylation-of\\\",\\n \\\"controls-state-change-of\\\",\\n \\\"controls-transport-of\\\",\\n \\\"controls-expression-of\\\")\",\n \"\\nAttaching package: ‘igraph’\\n\\n\\nThe following objects are masked from ‘package:stats’:\\n\\n decompose, spectrum\\n\\n\\nThe following object is masked from ‘package:base’:\\n\\n union\\n\\n\\n\"\n ]\n ],\n [\n [\n \"Subset data frame `pcdf` to obtain only the rows whose interactions are in `interaction_types_ppi`, and select only columns 1 and 3:\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"pcdf_ppi <- pcdf[pcdf$interaction_type %in% interaction_types_ppi,c(1,3)]\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"Use the `igraph` function `graph_from_data_farme` to build a network from the edge-list data in `pcdf_ppi`; use `print` to see a summary of the graph:\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"graph_ppi <- graph_from_data_frame(pcdf_ppi,\\n directed=FALSE)\\nprint(graph_ppi)\",\n \"IGRAPH ba9e496 UN-- 17020 523498 -- \\n+ attr: name (v/c)\\n+ edges from ba9e496 (vertex names):\\n [1] A1BG--ABCC6 A1BG--ANXA7 A1BG--CDKN1A A1BG--CRISP3 A1BG--GDPD1 \\n [6] A1BG--GRB2 A1BG--GRB7 A1BG--HNF4A A1BG--ONECUT1 A1BG--PIK3CA \\n[11] A1BG--PIK3R1 A1BG--PRDX4 A1BG--PTPN11 A1BG--SETD7 A1BG--SMN1 \\n[16] A1BG--SMN2 A1BG--SNCA A1BG--SOS1 A1BG--TK1 A1CF--ACBD3 \\n[21] A1CF--ACLY A1CF--APOBEC1 A1CF--APOBEC1 A1CF--ATF2 A1CF--CELF2 \\n[26] A1CF--CTNNB1 A1CF--E2F1 A1CF--E2F3 A1CF--E2F4 A1CF--FHL3 \\n[31] A1CF--HNF1A A1CF--HNF4A A1CF--JUN A1CF--KAT5 A1CF--KHSRP \\n[36] A1CF--MBD2 A1CF--MBD3 A1CF--NRF1 A1CF--RBL2 A1CF--REL \\n+ ... omitted several edges\\n\"\n ]\n ],\n [\n [\n \"Do the same for the metabolic network:\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"pcdf_metab <- pcdf[pcdf$interaction_type %in% interaction_types_metab, c(1,3)]\\ngraph_metab <- graph_from_data_frame(pcdf_metab,\\n directed=TRUE)\\nprint(graph_metab)\",\n \"IGRAPH 77472bf DN-- 7620 38145 -- \\n+ attr: name (v/c)\\n+ edges from 77472bf (vertex names):\\n [1] A4GALT->CHEBI:17659 A4GALT->CHEBI:17950 A4GALT->CHEBI:18307\\n [4] A4GALT->CHEBI:18313 A4GALT->CHEBI:58223 A4GALT->CHEBI:67119\\n [7] A4GNT ->CHEBI:17659 A4GNT ->CHEBI:58223 AAAS ->CHEBI:1604 \\n[10] AAAS ->CHEBI:2274 AACS ->CHEBI:13705 AACS ->CHEBI:15345\\n[13] AACS ->CHEBI:17369 AACS ->CHEBI:18361 AACS ->CHEBI:29888\\n[16] AACS ->CHEBI:57286 AACS ->CHEBI:57287 AACS ->CHEBI:57288\\n[19] AACS ->CHEBI:57392 AACS ->CHEBI:58280 AADAC ->CHEBI:17790\\n[22] AADAC ->CHEBI:40574 AADAC ->CHEBI:4743 AADAC ->CHEBI:85505\\n+ ... omitted several edges\\n\"\n ]\n ],\n [\n [\n \"Do the same for the directed protein-protein interactions:\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"pcdf_ppd <- pcdf[pcdf$interaction_type %in% interaction_types_ppd, c(1,3)]\\ngraph_ppd <- graph_from_data_frame(pcdf_ppd,\\n directed=TRUE)\\nprint(graph_ppd)\",\n \"IGRAPH DN-- 16063 359713 -- \\n+ attr: name (v/c), interaction_type (e/c)\\nIGRAPH DN-- 16063 359713 -- \\n+ attr: name (v/c), interaction_type (e/c)\\n+ edges (vertex names):\\n [1] A1BG ->A2M A1BG ->AKT1 A1BG ->AKT1 A2M ->APOA1 \\n [5] A2M ->CDC42 A2M ->RAC1 A2M ->RAC2 A2M ->RAC3 \\n [9] A2M ->RHOA A2M ->RHOBTB1 A2M ->RHOBTB2 A2M ->RHOB \\n[13] A2M ->RHOC A2M ->RHOD A2M ->RHOF A2M ->RHOG \\n[17] A2M ->RHOH A2M ->RHOJ A2M ->RHOQ A2M ->RHOT1 \\n[21] A2M ->RHOT2 A2M ->RHOU A2M ->RHOV A4GALT->ABO \\n[25] A4GALT->AK3 A4GALT->B3GALNT1 A4GALT->B3GALT1 A4GALT->B3GALT2 \\n[29] A4GALT->B3GALT4 A4GALT->B3GALT5 A4GALT->B3GALT6 A4GALT->B3GAT2 \\n+ ... omitted several edges\\n\"\n ]\n ],\n [\n [\n \"Question: of the three networks that you just created, which has the most edges?\",\n \"_____no_output_____\"\n ],\n [\n \"Next, we need to create a small graph. Let's make a three-vertex undirected graph from an edge-list. Let's connect all vertices to all other vertices: 1<->2, 2<->3, 3<->1. We'll once again use graph_from_data_farme to do this:\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"testgraph <- graph_from_data_frame(data.frame(c(1,2,3), c(2,3,1)), directed=FALSE)\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"Now let's plot the small test graph:\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"plot(testgraph)\",\n \"_____no_output_____\"\n ]\n ]\n]"},"cell_types":{"kind":"list like","value":["markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code"],"string":"[\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\"\n]"},"cell_type_groups":{"kind":"list like","value":[["markdown","markdown"],["code"],["markdown"],["code","code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown","markdown"],["code"],["markdown"],["code"]],"string":"[\n [\n \"markdown\",\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\",\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ]\n]"}}},{"rowIdx":1459071,"cells":{"hexsha":{"kind":"string","value":"e7f02ab92845ae16e119fa775c6f2c740354a85d"},"size":{"kind":"number","value":36185,"string":"36,185"},"ext":{"kind":"string","value":"ipynb"},"lang":{"kind":"string","value":"Jupyter Notebook"},"max_stars_repo_path":{"kind":"string","value":"Trial_Run.ipynb"},"max_stars_repo_name":{"kind":"string","value":"RWJohns/ComputerVision_Steel"},"max_stars_repo_head_hexsha":{"kind":"string","value":"0ed56f8d2169f31253286cd4c834aa27c37fd489"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"Trial_Run.ipynb"},"max_issues_repo_name":{"kind":"string","value":"RWJohns/ComputerVision_Steel"},"max_issues_repo_head_hexsha":{"kind":"string","value":"0ed56f8d2169f31253286cd4c834aa27c37fd489"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"Trial_Run.ipynb"},"max_forks_repo_name":{"kind":"string","value":"RWJohns/ComputerVision_Steel"},"max_forks_repo_head_hexsha":{"kind":"string","value":"0ed56f8d2169f31253286cd4c834aa27c37fd489"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"avg_line_length":{"kind":"number","value":50.1177285319,"string":"50.117729"},"max_line_length":{"kind":"number","value":1675,"string":"1,675"},"alphanum_fraction":{"kind":"number","value":0.5784993782,"string":"0.578499"},"cells":{"kind":"list like","value":[[["import os\nimport numpy as np\nimport pandas as pd\n\nimport os\nimport cv2\nfrom pathlib import Path\n\nfrom skimage.io import imsave, imread\n\nimport tensorflow as tf\n\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.layers import Input, concatenate, Conv2D, MaxPooling2D, Conv2DTranspose\nfrom tensorflow.keras.optimizers import Adam\nfrom tensorflow.keras.callbacks import ModelCheckpoint\nfrom tensorflow.keras import backend as K\nfrom tensorflow.keras.models import load_model\n\nfrom tensorflow.python.framework import ops\nops.reset_default_graph()\n","_____no_output_____"],["#This csv loads the info that will become the masks which is in run-length encoding\ntrain_df = pd.read_csv('~/Data/Metis/Steel/train.csv')\ntrain_df.shape","_____no_output_____"],["data_path = \"/Users/robjohns/Data/Metis/Steel/train_images/\"\ntrain_data_path = os.path.join(data_path)\nimages = os.listdir(train_data_path)\nprint(len(images))","12568\n"],["def name_and_mask(start_idx):\n #in data set, each images has 4 rows, this grabs all 4 and makes sure image name matches\n \n col = start_idx\n img_names = [str(i).split(\"_\")[0] for i in train_df.iloc[col:col+4, 0].values]\n if not (img_names[0] == img_names[1] == img_names[2] == img_names[3]):\n raise ValueError\n \n # This takes the 4 values of tagged pixels for each of the 4 defect tags \n #for the current image\n labels = train_df.iloc[col:col+4, 1]\n \n #makes an empty mask that is 256x1600 pixels with 4 layers for each pixel\n mask = np.zeros((256, 1600, 4), dtype=np.uint8)\n\n \n #\n for idx, label in enumerate(labels.values):\n \n# 4 times, once for each layer, the mask label is processed\n# the output will leave all 0's for the mask we made above if there is no code\n# or it will be converted to changing the mask on that layer\n \n if label is not np.nan:\n mask_label = np.zeros(1600*256, dtype=np.uint8)\n label = label.split(\" \")\n\n#makes a list out of non-zero labels, alternating between positions and lengths\n \n positions = map(int, label[0::2])\n length = map(int, label[1::2])\n \n#makes lists of positions and lengths by iterating every other, \n#and forces them to become int \n \n for pos, le in zip(positions, length):\n mask_label[pos-1:pos+le-1] = 1\n mask[:, :, idx] = mask_label.reshape(256, 1600, order='F')\n# the positions called in label are turned to 1 in the mask for this layer\n \n return img_names[0], mask","_____no_output_____"],["#Make a full table of 4 masks per image with dims [numpix,vertpix,horpix,masks(4)]","_____no_output_____"],["#Build Masks\n\nyname=[]\ny=[]\nfor idx in range(0,128,4):\n pix,masks=name_and_mask(idx)\n indy=int(idx/4)\n y.append(masks.astype(np.float32))\n yname.append(pix)\ny=np.stack(y, axis=0) \ny_train=y\n\n\ny_test=[]\nfor idx in range(0,128,4):\n pix,masks=name_and_mask(idx)\n indy=int(idx/4)\n y_test.append(masks.astype(np.float32))\ny_test=np.stack(y_test, axis=0)","_____no_output_____"],["#process image files into numpy arrays \nx=[]\nfor idx in range(0,128,4): \n name, mask = name_and_mask(idx)\n abs_path = \"/Users/robjohns/Data/Metis/Steel/train_images/\"\n filename=abs_path+name\n impath = Path(filename)\n img = cv2.imread(filename)\n x.append(img.astype(np.float32))\nx=np.stack(x, axis=0) \nx_train=x\nx_train=x_train/255\n\nx_test=[]\nfor idx in range(0,128,4): \n name, mask = name_and_mask(idx)\n abs_path = \"/Users/robjohns/Data/Metis/Steel/train_images/\"\n filename=abs_path+name\n impath = Path(filename)\n img = cv2.imread(filename)\n x_test.append(img.astype(np.float32))\nx_test=np.stack(x_test, axis=0)\nx_test=x_test/255","_____no_output_____"],["x_test.shape","_____no_output_____"],["\nim_idx_list = []\nim_idx_list_onlydf = []\nim_idx_list_nodf = []\n\n\nfor col in range(0, len(train_df), 4):\n img_names = [str(i).split(\"_\")[0] for i in train_df.iloc[col:col+4, 0].values]\n if not (img_names[0] == img_names[1] == img_names[2] == img_names[3]):\n raise ValueError\n \n \n \n labels = train_df.iloc[col:col+4, 1]\n if labels.isna().all():\n im_idx_list_nodf.append(col)\n im_idx_list.append(col)\n \n \n elif (labels.isna() == [False, True, True, True]).all():\n im_idx_list.append(col)\n im_idx_list_onlydf.append(col)\n \n elif (labels.isna() == [True, False, True, True]).all():\n im_idx_list.append(col)\n im_idx_list_onlydf.append(col)\n \n elif (labels.isna() == [True, True, False, True]).all():\n im_idx_list.append(col)\n im_idx_list_onlydf.append(col)\n \n elif (labels.isna() == [True, True, True, False]).all():\n im_idx_list.append(col)\n im_idx_list_onlydf.append(col)\n \n else:\n im_idx_list.append(col)\n im_idx_list_onlydf.append(col)","_____no_output_____"],["class TFSCGen(keras.utils.Sequence):\n \"\"\"Generator class for Tensorflow Speech Competition data\n \n args:\n - gen_path (patlib.Path): a path pointing to a directory containing training examples\n examples should be stored in format `gen_path/[class_name]/[file_name].wav`\n - batch_size (int): size of batches to return\n - shuffle (bool): whether or not data is to be shuffled between batches\n \n \"\"\"\n \n \n def __init__(self, gen_path=im_idx_list, batch_size = 32, shuffle=True):\n \n \n self.gen_files = im_idx_list\n \n self.batch_size = batch_size\n self.shuffle = shuffle\n \n if self.shuffle:\n random.shuffle(self.gen_files)\n\n def __len__(self):\n \"\"\"returns the number of examples\"\"\"\n \n return int(np.ceil(len(self.gen_files) / float(self.batch_size)))\n \n def on_epoch_end(self):\n \"\"\"shuffles data after an epoch runs (but only if self.shuffle is set)\"\"\"\n \n if self.shuffle:\n random.shuffle(self.gen_files)\n \n\n def __getitem__(self, idx):\n \"\"\"function to return the batch given the batch index\n \n args:\n idx (int): this is the batch index generated by keras\n \n \"\"\"\n \n \n start_idx = idx*self.batch_size\n batch_rows = self.gen_files[start_idx:start_idx+self.batch_size]\n \n x=[]\n for idx in batch_rows: \n name, mask = name_and_mask(idx)\n abs_path = \"/Users/robjohns/Data/Metis/Steel/train_images/\"\n #need to generalize this for tests\n \n filename=abs_path+name\n impath = Path(filename)\n img = cv2.imread(filename)\n x.append(img.astype(np.float32))\n x=np.stack(x, axis=0) \n x=x/255\n \n \n y=[]\n for idx in batch_rows:\n pix,masks=name_and_mask(idx)\n indy=int(idx/4)\n y.append(masks.astype(np.float32))\n yname.append(pix)\n y=np.stack(y, axis=0) \n \n \n \n return x,y","_____no_output_____"],["#put TFSCGen() where x,y would be in model call\n\n# example: conv_model.fit(TFSCGen(), validation_data=TFSCGen(tfsc_val),\n epochs=100,\n callbacks=[\n keras.callbacks.ReduceLROnPlateau(patience=3, verbose=True), \n keras.callbacks.EarlyStopping(patience=8, restore_best_weights=True, verbose=True)\n ]) \n","_____no_output_____"],["#do test train split here, you need to send x_train, y_train, x_test, and y_test to model","_____no_output_____"],["len(x),len(y),len(yname)","_____no_output_____"],["np.save('imgs_masks.npy', y)\nnp.save('imgs.npy', x)\nnp.save('imgs_names.npy', yname)","_____no_output_____"],["train_df.shape","_____no_output_____"],["50272/4","_____no_output_____"],["def dice_coef(y_true, y_pred):\n smooth = 1.\n y_true_f = K.flatten(y_true)\n y_pred_f = K.flatten(y_pred)\n intersection = K.sum(y_true_f * y_pred_f)\n return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)\n\n\ndef dice_coef_loss(y_true, y_pred):\n return -dice_coef(y_true, y_pred)","_____no_output_____"],["# unet from https://github.com/jocicmarko/ultrasound-nerve-segmentation","_____no_output_____"],["def get_unet():\n inputs = Input((256, 1600, 3))\n conv1 = Conv2D(32, (3, 3), activation='relu', padding='same')(inputs)\n conv1 = Conv2D(32, (3, 3), activation='relu', padding='same')(conv1)\n pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)\n\n conv2 = Conv2D(48, (3, 3), activation='relu', padding='same')(pool1)\n conv2 = Conv2D(48, (3, 3), activation='relu', padding='same')(conv2)\n pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)\n\n conv3 = Conv2D(72, (3, 3), activation='relu', padding='same')(pool2)\n conv3 = Conv2D(72, (3, 3), activation='relu', padding='same')(conv3)\n pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)\n\n conv4 = Conv2D(108, (3, 3), activation='relu', padding='same')(pool3)\n conv4 = Conv2D(108, (3, 3), activation='relu', padding='same')(conv4)\n pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)\n\n conv5 = Conv2D(162, (3, 3), activation='relu', padding='same')(pool4)\n conv5 = Conv2D(162, (3, 3), activation='relu', padding='same')(conv5)\n\n up6 = concatenate([Conv2DTranspose(256, (2, 2), strides=(2, 2), padding='same')(conv5), conv4], axis=3)\n conv6 = Conv2D(108, (3, 3), activation='relu', padding='same')(up6)\n conv6 = Conv2D(108, (3, 3), activation='relu', padding='same')(conv6)\n\n up7 = concatenate([Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same')(conv6), conv3], axis=3)\n conv7 = Conv2D(72, (3, 3), activation='relu', padding='same')(up7)\n conv7 = Conv2D(72, (3, 3), activation='relu', padding='same')(conv7)\n\n up8 = concatenate([Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(conv7), conv2], axis=3)\n conv8 = Conv2D(48, (3, 3), activation='relu', padding='same')(up8)\n conv8 = Conv2D(48, (3, 3), activation='relu', padding='same')(conv8)\n\n up9 = concatenate([Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same')(conv8), conv1], axis=3)\n conv9 = Conv2D(32, (3, 3), activation='relu', padding='same')(up9)\n conv9 = Conv2D(32, (3, 3), activation='relu', padding='same')(conv9)\n\n conv10 = Conv2D(4, (1, 1), activation='sigmoid')(conv9)\n\n model = Model(inputs=[inputs], outputs=[conv10])\n\n model.compile(optimizer=Adam(lr=1e-5), loss=dice_coef_loss, metrics=[dice_coef])\n\n return model\n","_____no_output_____"],["unet_model = get_unet()\n\nunet_model.summary()\n#check dimensions match expected output. ","Model: \"model_2\"\n__________________________________________________________________________________________________\nLayer (type) Output Shape Param # Connected to \n==================================================================================================\ninput_3 (InputLayer) [(None, 256, 1600, 3 0 \n__________________________________________________________________________________________________\nconv2d_38 (Conv2D) (None, 256, 1600, 32 896 input_3[0][0] \n__________________________________________________________________________________________________\nconv2d_39 (Conv2D) (None, 256, 1600, 32 9248 conv2d_38[0][0] \n__________________________________________________________________________________________________\nmax_pooling2d_8 (MaxPooling2D) (None, 128, 800, 32) 0 conv2d_39[0][0] \n__________________________________________________________________________________________________\nconv2d_40 (Conv2D) (None, 128, 800, 48) 13872 max_pooling2d_8[0][0] \n__________________________________________________________________________________________________\nconv2d_41 (Conv2D) (None, 128, 800, 48) 20784 conv2d_40[0][0] \n__________________________________________________________________________________________________\nmax_pooling2d_9 (MaxPooling2D) (None, 64, 400, 48) 0 conv2d_41[0][0] \n__________________________________________________________________________________________________\nconv2d_42 (Conv2D) (None, 64, 400, 72) 31176 max_pooling2d_9[0][0] \n__________________________________________________________________________________________________\nconv2d_43 (Conv2D) (None, 64, 400, 72) 46728 conv2d_42[0][0] \n__________________________________________________________________________________________________\nmax_pooling2d_10 (MaxPooling2D) (None, 32, 200, 72) 0 conv2d_43[0][0] \n__________________________________________________________________________________________________\nconv2d_44 (Conv2D) (None, 32, 200, 108) 70092 max_pooling2d_10[0][0] \n__________________________________________________________________________________________________\nconv2d_45 (Conv2D) (None, 32, 200, 108) 105084 conv2d_44[0][0] \n__________________________________________________________________________________________________\nmax_pooling2d_11 (MaxPooling2D) (None, 16, 100, 108) 0 conv2d_45[0][0] \n__________________________________________________________________________________________________\nconv2d_46 (Conv2D) (None, 16, 100, 162) 157626 max_pooling2d_11[0][0] \n__________________________________________________________________________________________________\nconv2d_47 (Conv2D) (None, 16, 100, 162) 236358 conv2d_46[0][0] \n__________________________________________________________________________________________________\nconv2d_transpose_8 (Conv2DTrans (None, 32, 200, 256) 166144 conv2d_47[0][0] \n__________________________________________________________________________________________________\nconcatenate_8 (Concatenate) (None, 32, 200, 364) 0 conv2d_transpose_8[0][0] \n conv2d_45[0][0] \n__________________________________________________________________________________________________\nconv2d_48 (Conv2D) (None, 32, 200, 108) 353916 concatenate_8[0][0] \n__________________________________________________________________________________________________\nconv2d_49 (Conv2D) (None, 32, 200, 108) 105084 conv2d_48[0][0] \n__________________________________________________________________________________________________\nconv2d_transpose_9 (Conv2DTrans (None, 64, 400, 128) 55424 conv2d_49[0][0] \n__________________________________________________________________________________________________\nconcatenate_9 (Concatenate) (None, 64, 400, 200) 0 conv2d_transpose_9[0][0] \n conv2d_43[0][0] \n__________________________________________________________________________________________________\nconv2d_50 (Conv2D) (None, 64, 400, 72) 129672 concatenate_9[0][0] \n__________________________________________________________________________________________________\nconv2d_51 (Conv2D) (None, 64, 400, 72) 46728 conv2d_50[0][0] \n__________________________________________________________________________________________________\nconv2d_transpose_10 (Conv2DTran (None, 128, 800, 64) 18496 conv2d_51[0][0] \n__________________________________________________________________________________________________\nconcatenate_10 (Concatenate) (None, 128, 800, 112 0 conv2d_transpose_10[0][0] \n conv2d_41[0][0] \n__________________________________________________________________________________________________\nconv2d_52 (Conv2D) (None, 128, 800, 48) 48432 concatenate_10[0][0] \n__________________________________________________________________________________________________\nconv2d_53 (Conv2D) (None, 128, 800, 48) 20784 conv2d_52[0][0] \n__________________________________________________________________________________________________\nconv2d_transpose_11 (Conv2DTran (None, 256, 1600, 32 6176 conv2d_53[0][0] \n__________________________________________________________________________________________________\nconcatenate_11 (Concatenate) (None, 256, 1600, 64 0 conv2d_transpose_11[0][0] \n conv2d_39[0][0] \n__________________________________________________________________________________________________\nconv2d_54 (Conv2D) (None, 256, 1600, 32 18464 concatenate_11[0][0] \n__________________________________________________________________________________________________\nconv2d_55 (Conv2D) (None, 256, 1600, 32 9248 conv2d_54[0][0] \n__________________________________________________________________________________________________\nconv2d_56 (Conv2D) (None, 256, 1600, 4) 132 conv2d_55[0][0] \n==================================================================================================\nTotal params: 1,670,564\nTrainable params: 1,670,564\nNon-trainable params: 0\n__________________________________________________________________________________________________\n"],["def train_and_predict():\n\n #'Loading and preprocessing train data\n \n \n imgs_train, imgs_mask_train = x_train, y_train\n\n \n \n \n #imgs_train = imgs_train/255\n \n mean = np.mean(imgs_train) # mean for data centering\n std = np.std(imgs_train) # std for data normalization\n\n #imgs_train -= mean\n #imgs_train /= std\n\n \n\n \n #Creating and compiling model\n \n \n model = get_unet()\n model_checkpoint = ModelCheckpoint('weights.h5', monitor='val_loss', save_best_only=True)\n\n \n # Fitting model\n \n model.fit(imgs_train, imgs_mask_train, batch_size=32, epochs=2, verbose=1, shuffle=True,\n validation_split=0,\n callbacks=[model_checkpoint])\n\n \n #Loading and preprocessing test data\n \n imgs_test, imgs_id_test = x_test, y_test\n \n\n imgs_test # /= 255.\n #imgs_test -= mean\n #imgs_test /= std\n\n \n # Loading saved weights.\n model.load_weights('weights.h5')\n\n \n #Predicting masks on test data\n \n imgs_mask_test = model.predict(imgs_test, verbose=1)\n \n #convert masks to a table of run length encoding\n \n ","_____no_output_____"],["train_and_predict()","Train on 32 samples\nEpoch 1/2\nWARNING:tensorflow:Can save best model only with val_loss available, skipping.\n32/32 [==============================] - 185s 6s/sample - loss: -0.0088 - dice_coef: 0.0088\nEpoch 2/2\nWARNING:tensorflow:Can save best model only with val_loss available, skipping.\n32/32 [==============================] - 157s 5s/sample - loss: -0.0098 - dice_coef: 0.0098\n"]]],"string":"[\n [\n [\n \"import os\\nimport numpy as np\\nimport pandas as pd\\n\\nimport os\\nimport cv2\\nfrom pathlib import Path\\n\\nfrom skimage.io import imsave, imread\\n\\nimport tensorflow as tf\\n\\nfrom tensorflow.keras.models import Model\\nfrom tensorflow.keras.layers import Input, concatenate, Conv2D, MaxPooling2D, Conv2DTranspose\\nfrom tensorflow.keras.optimizers import Adam\\nfrom tensorflow.keras.callbacks import ModelCheckpoint\\nfrom tensorflow.keras import backend as K\\nfrom tensorflow.keras.models import load_model\\n\\nfrom tensorflow.python.framework import ops\\nops.reset_default_graph()\\n\",\n \"_____no_output_____\"\n ],\n [\n \"#This csv loads the info that will become the masks which is in run-length encoding\\ntrain_df = pd.read_csv('~/Data/Metis/Steel/train.csv')\\ntrain_df.shape\",\n \"_____no_output_____\"\n ],\n [\n \"data_path = \\\"/Users/robjohns/Data/Metis/Steel/train_images/\\\"\\ntrain_data_path = os.path.join(data_path)\\nimages = os.listdir(train_data_path)\\nprint(len(images))\",\n \"12568\\n\"\n ],\n [\n \"def name_and_mask(start_idx):\\n #in data set, each images has 4 rows, this grabs all 4 and makes sure image name matches\\n \\n col = start_idx\\n img_names = [str(i).split(\\\"_\\\")[0] for i in train_df.iloc[col:col+4, 0].values]\\n if not (img_names[0] == img_names[1] == img_names[2] == img_names[3]):\\n raise ValueError\\n \\n # This takes the 4 values of tagged pixels for each of the 4 defect tags \\n #for the current image\\n labels = train_df.iloc[col:col+4, 1]\\n \\n #makes an empty mask that is 256x1600 pixels with 4 layers for each pixel\\n mask = np.zeros((256, 1600, 4), dtype=np.uint8)\\n\\n \\n #\\n for idx, label in enumerate(labels.values):\\n \\n# 4 times, once for each layer, the mask label is processed\\n# the output will leave all 0's for the mask we made above if there is no code\\n# or it will be converted to changing the mask on that layer\\n \\n if label is not np.nan:\\n mask_label = np.zeros(1600*256, dtype=np.uint8)\\n label = label.split(\\\" \\\")\\n\\n#makes a list out of non-zero labels, alternating between positions and lengths\\n \\n positions = map(int, label[0::2])\\n length = map(int, label[1::2])\\n \\n#makes lists of positions and lengths by iterating every other, \\n#and forces them to become int \\n \\n for pos, le in zip(positions, length):\\n mask_label[pos-1:pos+le-1] = 1\\n mask[:, :, idx] = mask_label.reshape(256, 1600, order='F')\\n# the positions called in label are turned to 1 in the mask for this layer\\n \\n return img_names[0], mask\",\n \"_____no_output_____\"\n ],\n [\n \"#Make a full table of 4 masks per image with dims [numpix,vertpix,horpix,masks(4)]\",\n \"_____no_output_____\"\n ],\n [\n \"#Build Masks\\n\\nyname=[]\\ny=[]\\nfor idx in range(0,128,4):\\n pix,masks=name_and_mask(idx)\\n indy=int(idx/4)\\n y.append(masks.astype(np.float32))\\n yname.append(pix)\\ny=np.stack(y, axis=0) \\ny_train=y\\n\\n\\ny_test=[]\\nfor idx in range(0,128,4):\\n pix,masks=name_and_mask(idx)\\n indy=int(idx/4)\\n y_test.append(masks.astype(np.float32))\\ny_test=np.stack(y_test, axis=0)\",\n \"_____no_output_____\"\n ],\n [\n \"#process image files into numpy arrays \\nx=[]\\nfor idx in range(0,128,4): \\n name, mask = name_and_mask(idx)\\n abs_path = \\\"/Users/robjohns/Data/Metis/Steel/train_images/\\\"\\n filename=abs_path+name\\n impath = Path(filename)\\n img = cv2.imread(filename)\\n x.append(img.astype(np.float32))\\nx=np.stack(x, axis=0) \\nx_train=x\\nx_train=x_train/255\\n\\nx_test=[]\\nfor idx in range(0,128,4): \\n name, mask = name_and_mask(idx)\\n abs_path = \\\"/Users/robjohns/Data/Metis/Steel/train_images/\\\"\\n filename=abs_path+name\\n impath = Path(filename)\\n img = cv2.imread(filename)\\n x_test.append(img.astype(np.float32))\\nx_test=np.stack(x_test, axis=0)\\nx_test=x_test/255\",\n \"_____no_output_____\"\n ],\n [\n \"x_test.shape\",\n \"_____no_output_____\"\n ],\n [\n \"\\nim_idx_list = []\\nim_idx_list_onlydf = []\\nim_idx_list_nodf = []\\n\\n\\nfor col in range(0, len(train_df), 4):\\n img_names = [str(i).split(\\\"_\\\")[0] for i in train_df.iloc[col:col+4, 0].values]\\n if not (img_names[0] == img_names[1] == img_names[2] == img_names[3]):\\n raise ValueError\\n \\n \\n \\n labels = train_df.iloc[col:col+4, 1]\\n if labels.isna().all():\\n im_idx_list_nodf.append(col)\\n im_idx_list.append(col)\\n \\n \\n elif (labels.isna() == [False, True, True, True]).all():\\n im_idx_list.append(col)\\n im_idx_list_onlydf.append(col)\\n \\n elif (labels.isna() == [True, False, True, True]).all():\\n im_idx_list.append(col)\\n im_idx_list_onlydf.append(col)\\n \\n elif (labels.isna() == [True, True, False, True]).all():\\n im_idx_list.append(col)\\n im_idx_list_onlydf.append(col)\\n \\n elif (labels.isna() == [True, True, True, False]).all():\\n im_idx_list.append(col)\\n im_idx_list_onlydf.append(col)\\n \\n else:\\n im_idx_list.append(col)\\n im_idx_list_onlydf.append(col)\",\n \"_____no_output_____\"\n ],\n [\n \"class TFSCGen(keras.utils.Sequence):\\n \\\"\\\"\\\"Generator class for Tensorflow Speech Competition data\\n \\n args:\\n - gen_path (patlib.Path): a path pointing to a directory containing training examples\\n examples should be stored in format `gen_path/[class_name]/[file_name].wav`\\n - batch_size (int): size of batches to return\\n - shuffle (bool): whether or not data is to be shuffled between batches\\n \\n \\\"\\\"\\\"\\n \\n \\n def __init__(self, gen_path=im_idx_list, batch_size = 32, shuffle=True):\\n \\n \\n self.gen_files = im_idx_list\\n \\n self.batch_size = batch_size\\n self.shuffle = shuffle\\n \\n if self.shuffle:\\n random.shuffle(self.gen_files)\\n\\n def __len__(self):\\n \\\"\\\"\\\"returns the number of examples\\\"\\\"\\\"\\n \\n return int(np.ceil(len(self.gen_files) / float(self.batch_size)))\\n \\n def on_epoch_end(self):\\n \\\"\\\"\\\"shuffles data after an epoch runs (but only if self.shuffle is set)\\\"\\\"\\\"\\n \\n if self.shuffle:\\n random.shuffle(self.gen_files)\\n \\n\\n def __getitem__(self, idx):\\n \\\"\\\"\\\"function to return the batch given the batch index\\n \\n args:\\n idx (int): this is the batch index generated by keras\\n \\n \\\"\\\"\\\"\\n \\n \\n start_idx = idx*self.batch_size\\n batch_rows = self.gen_files[start_idx:start_idx+self.batch_size]\\n \\n x=[]\\n for idx in batch_rows: \\n name, mask = name_and_mask(idx)\\n abs_path = \\\"/Users/robjohns/Data/Metis/Steel/train_images/\\\"\\n #need to generalize this for tests\\n \\n filename=abs_path+name\\n impath = Path(filename)\\n img = cv2.imread(filename)\\n x.append(img.astype(np.float32))\\n x=np.stack(x, axis=0) \\n x=x/255\\n \\n \\n y=[]\\n for idx in batch_rows:\\n pix,masks=name_and_mask(idx)\\n indy=int(idx/4)\\n y.append(masks.astype(np.float32))\\n yname.append(pix)\\n y=np.stack(y, axis=0) \\n \\n \\n \\n return x,y\",\n \"_____no_output_____\"\n ],\n [\n \"#put TFSCGen() where x,y would be in model call\\n\\n# example: conv_model.fit(TFSCGen(), validation_data=TFSCGen(tfsc_val),\\n epochs=100,\\n callbacks=[\\n keras.callbacks.ReduceLROnPlateau(patience=3, verbose=True), \\n keras.callbacks.EarlyStopping(patience=8, restore_best_weights=True, verbose=True)\\n ]) \\n\",\n \"_____no_output_____\"\n ],\n [\n \"#do test train split here, you need to send x_train, y_train, x_test, and y_test to model\",\n \"_____no_output_____\"\n ],\n [\n \"len(x),len(y),len(yname)\",\n \"_____no_output_____\"\n ],\n [\n \"np.save('imgs_masks.npy', y)\\nnp.save('imgs.npy', x)\\nnp.save('imgs_names.npy', yname)\",\n \"_____no_output_____\"\n ],\n [\n \"train_df.shape\",\n \"_____no_output_____\"\n ],\n [\n \"50272/4\",\n \"_____no_output_____\"\n ],\n [\n \"def dice_coef(y_true, y_pred):\\n smooth = 1.\\n y_true_f = K.flatten(y_true)\\n y_pred_f = K.flatten(y_pred)\\n intersection = K.sum(y_true_f * y_pred_f)\\n return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)\\n\\n\\ndef dice_coef_loss(y_true, y_pred):\\n return -dice_coef(y_true, y_pred)\",\n \"_____no_output_____\"\n ],\n [\n \"# unet from https://github.com/jocicmarko/ultrasound-nerve-segmentation\",\n \"_____no_output_____\"\n ],\n [\n \"def get_unet():\\n inputs = Input((256, 1600, 3))\\n conv1 = Conv2D(32, (3, 3), activation='relu', padding='same')(inputs)\\n conv1 = Conv2D(32, (3, 3), activation='relu', padding='same')(conv1)\\n pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)\\n\\n conv2 = Conv2D(48, (3, 3), activation='relu', padding='same')(pool1)\\n conv2 = Conv2D(48, (3, 3), activation='relu', padding='same')(conv2)\\n pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)\\n\\n conv3 = Conv2D(72, (3, 3), activation='relu', padding='same')(pool2)\\n conv3 = Conv2D(72, (3, 3), activation='relu', padding='same')(conv3)\\n pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)\\n\\n conv4 = Conv2D(108, (3, 3), activation='relu', padding='same')(pool3)\\n conv4 = Conv2D(108, (3, 3), activation='relu', padding='same')(conv4)\\n pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)\\n\\n conv5 = Conv2D(162, (3, 3), activation='relu', padding='same')(pool4)\\n conv5 = Conv2D(162, (3, 3), activation='relu', padding='same')(conv5)\\n\\n up6 = concatenate([Conv2DTranspose(256, (2, 2), strides=(2, 2), padding='same')(conv5), conv4], axis=3)\\n conv6 = Conv2D(108, (3, 3), activation='relu', padding='same')(up6)\\n conv6 = Conv2D(108, (3, 3), activation='relu', padding='same')(conv6)\\n\\n up7 = concatenate([Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same')(conv6), conv3], axis=3)\\n conv7 = Conv2D(72, (3, 3), activation='relu', padding='same')(up7)\\n conv7 = Conv2D(72, (3, 3), activation='relu', padding='same')(conv7)\\n\\n up8 = concatenate([Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(conv7), conv2], axis=3)\\n conv8 = Conv2D(48, (3, 3), activation='relu', padding='same')(up8)\\n conv8 = Conv2D(48, (3, 3), activation='relu', padding='same')(conv8)\\n\\n up9 = concatenate([Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same')(conv8), conv1], axis=3)\\n conv9 = Conv2D(32, (3, 3), activation='relu', padding='same')(up9)\\n conv9 = Conv2D(32, (3, 3), activation='relu', padding='same')(conv9)\\n\\n conv10 = Conv2D(4, (1, 1), activation='sigmoid')(conv9)\\n\\n model = Model(inputs=[inputs], outputs=[conv10])\\n\\n model.compile(optimizer=Adam(lr=1e-5), loss=dice_coef_loss, metrics=[dice_coef])\\n\\n return model\\n\",\n \"_____no_output_____\"\n ],\n [\n \"unet_model = get_unet()\\n\\nunet_model.summary()\\n#check dimensions match expected output. \",\n \"Model: \\\"model_2\\\"\\n__________________________________________________________________________________________________\\nLayer (type) Output Shape Param # Connected to \\n==================================================================================================\\ninput_3 (InputLayer) [(None, 256, 1600, 3 0 \\n__________________________________________________________________________________________________\\nconv2d_38 (Conv2D) (None, 256, 1600, 32 896 input_3[0][0] \\n__________________________________________________________________________________________________\\nconv2d_39 (Conv2D) (None, 256, 1600, 32 9248 conv2d_38[0][0] \\n__________________________________________________________________________________________________\\nmax_pooling2d_8 (MaxPooling2D) (None, 128, 800, 32) 0 conv2d_39[0][0] \\n__________________________________________________________________________________________________\\nconv2d_40 (Conv2D) (None, 128, 800, 48) 13872 max_pooling2d_8[0][0] \\n__________________________________________________________________________________________________\\nconv2d_41 (Conv2D) (None, 128, 800, 48) 20784 conv2d_40[0][0] \\n__________________________________________________________________________________________________\\nmax_pooling2d_9 (MaxPooling2D) (None, 64, 400, 48) 0 conv2d_41[0][0] \\n__________________________________________________________________________________________________\\nconv2d_42 (Conv2D) (None, 64, 400, 72) 31176 max_pooling2d_9[0][0] \\n__________________________________________________________________________________________________\\nconv2d_43 (Conv2D) (None, 64, 400, 72) 46728 conv2d_42[0][0] \\n__________________________________________________________________________________________________\\nmax_pooling2d_10 (MaxPooling2D) (None, 32, 200, 72) 0 conv2d_43[0][0] \\n__________________________________________________________________________________________________\\nconv2d_44 (Conv2D) (None, 32, 200, 108) 70092 max_pooling2d_10[0][0] \\n__________________________________________________________________________________________________\\nconv2d_45 (Conv2D) (None, 32, 200, 108) 105084 conv2d_44[0][0] \\n__________________________________________________________________________________________________\\nmax_pooling2d_11 (MaxPooling2D) (None, 16, 100, 108) 0 conv2d_45[0][0] \\n__________________________________________________________________________________________________\\nconv2d_46 (Conv2D) (None, 16, 100, 162) 157626 max_pooling2d_11[0][0] \\n__________________________________________________________________________________________________\\nconv2d_47 (Conv2D) (None, 16, 100, 162) 236358 conv2d_46[0][0] \\n__________________________________________________________________________________________________\\nconv2d_transpose_8 (Conv2DTrans (None, 32, 200, 256) 166144 conv2d_47[0][0] \\n__________________________________________________________________________________________________\\nconcatenate_8 (Concatenate) (None, 32, 200, 364) 0 conv2d_transpose_8[0][0] \\n conv2d_45[0][0] \\n__________________________________________________________________________________________________\\nconv2d_48 (Conv2D) (None, 32, 200, 108) 353916 concatenate_8[0][0] \\n__________________________________________________________________________________________________\\nconv2d_49 (Conv2D) (None, 32, 200, 108) 105084 conv2d_48[0][0] \\n__________________________________________________________________________________________________\\nconv2d_transpose_9 (Conv2DTrans (None, 64, 400, 128) 55424 conv2d_49[0][0] \\n__________________________________________________________________________________________________\\nconcatenate_9 (Concatenate) (None, 64, 400, 200) 0 conv2d_transpose_9[0][0] \\n conv2d_43[0][0] \\n__________________________________________________________________________________________________\\nconv2d_50 (Conv2D) (None, 64, 400, 72) 129672 concatenate_9[0][0] \\n__________________________________________________________________________________________________\\nconv2d_51 (Conv2D) (None, 64, 400, 72) 46728 conv2d_50[0][0] \\n__________________________________________________________________________________________________\\nconv2d_transpose_10 (Conv2DTran (None, 128, 800, 64) 18496 conv2d_51[0][0] \\n__________________________________________________________________________________________________\\nconcatenate_10 (Concatenate) (None, 128, 800, 112 0 conv2d_transpose_10[0][0] \\n conv2d_41[0][0] \\n__________________________________________________________________________________________________\\nconv2d_52 (Conv2D) (None, 128, 800, 48) 48432 concatenate_10[0][0] \\n__________________________________________________________________________________________________\\nconv2d_53 (Conv2D) (None, 128, 800, 48) 20784 conv2d_52[0][0] \\n__________________________________________________________________________________________________\\nconv2d_transpose_11 (Conv2DTran (None, 256, 1600, 32 6176 conv2d_53[0][0] \\n__________________________________________________________________________________________________\\nconcatenate_11 (Concatenate) (None, 256, 1600, 64 0 conv2d_transpose_11[0][0] \\n conv2d_39[0][0] \\n__________________________________________________________________________________________________\\nconv2d_54 (Conv2D) (None, 256, 1600, 32 18464 concatenate_11[0][0] \\n__________________________________________________________________________________________________\\nconv2d_55 (Conv2D) (None, 256, 1600, 32 9248 conv2d_54[0][0] \\n__________________________________________________________________________________________________\\nconv2d_56 (Conv2D) (None, 256, 1600, 4) 132 conv2d_55[0][0] \\n==================================================================================================\\nTotal params: 1,670,564\\nTrainable params: 1,670,564\\nNon-trainable params: 0\\n__________________________________________________________________________________________________\\n\"\n ],\n [\n \"def train_and_predict():\\n\\n #'Loading and preprocessing train data\\n \\n \\n imgs_train, imgs_mask_train = x_train, y_train\\n\\n \\n \\n \\n #imgs_train = imgs_train/255\\n \\n mean = np.mean(imgs_train) # mean for data centering\\n std = np.std(imgs_train) # std for data normalization\\n\\n #imgs_train -= mean\\n #imgs_train /= std\\n\\n \\n\\n \\n #Creating and compiling model\\n \\n \\n model = get_unet()\\n model_checkpoint = ModelCheckpoint('weights.h5', monitor='val_loss', save_best_only=True)\\n\\n \\n # Fitting model\\n \\n model.fit(imgs_train, imgs_mask_train, batch_size=32, epochs=2, verbose=1, shuffle=True,\\n validation_split=0,\\n callbacks=[model_checkpoint])\\n\\n \\n #Loading and preprocessing test data\\n \\n imgs_test, imgs_id_test = x_test, y_test\\n \\n\\n imgs_test # /= 255.\\n #imgs_test -= mean\\n #imgs_test /= std\\n\\n \\n # Loading saved weights.\\n model.load_weights('weights.h5')\\n\\n \\n #Predicting masks on test data\\n \\n imgs_mask_test = model.predict(imgs_test, verbose=1)\\n \\n #convert masks to a table of run length encoding\\n \\n \",\n \"_____no_output_____\"\n ],\n [\n \"train_and_predict()\",\n \"Train on 32 samples\\nEpoch 1/2\\nWARNING:tensorflow:Can save best model only with val_loss available, skipping.\\n32/32 [==============================] - 185s 6s/sample - loss: -0.0088 - dice_coef: 0.0088\\nEpoch 2/2\\nWARNING:tensorflow:Can save best model only with val_loss available, skipping.\\n32/32 [==============================] - 157s 5s/sample - loss: -0.0098 - dice_coef: 0.0098\\n\"\n ]\n ]\n]"},"cell_types":{"kind":"list like","value":["code"],"string":"[\n \"code\"\n]"},"cell_type_groups":{"kind":"list like","value":[["code","code","code","code","code","code","code","code","code","code","code","code","code","code","code","code","code","code","code","code","code","code"]],"string":"[\n [\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\"\n ]\n]"}}},{"rowIdx":1459072,"cells":{"hexsha":{"kind":"string","value":"e7f03dde214ec629e5a1c74a3a1de09db8ce0587"},"size":{"kind":"number","value":861832,"string":"861,832"},"ext":{"kind":"string","value":"ipynb"},"lang":{"kind":"string","value":"Jupyter Notebook"},"max_stars_repo_path":{"kind":"string","value":"_notebooks/2022-02-04-data-analysis-course-project.ipynb"},"max_stars_repo_name":{"kind":"string","value":"sandeshkatakam/My-Machine_learning-Blog"},"max_stars_repo_head_hexsha":{"kind":"string","value":"2d71f3bcac3662617b54d6b90a46c85a6ebc6830"},"max_stars_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2022-02-01T11:58:52.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2022-02-01T11:58:52.000Z"},"max_issues_repo_path":{"kind":"string","value":"_notebooks/2022-02-04-data-analysis-course-project.ipynb"},"max_issues_repo_name":{"kind":"string","value":"sandeshkatakam/My-Machine_learning-Blog"},"max_issues_repo_head_hexsha":{"kind":"string","value":"2d71f3bcac3662617b54d6b90a46c85a6ebc6830"},"max_issues_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_issues_count":{"kind":"number","value":5,"string":"5"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2022-02-01T12:00:39.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2022-02-18T03:44:00.000Z"},"max_forks_repo_path":{"kind":"string","value":"_notebooks/2022-02-04-data-analysis-course-project.ipynb"},"max_forks_repo_name":{"kind":"string","value":"sandeshkatakam/My-Machine_learning-Blog"},"max_forks_repo_head_hexsha":{"kind":"string","value":"2d71f3bcac3662617b54d6b90a46c85a6ebc6830"},"max_forks_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"avg_line_length":{"kind":"number","value":202.7362973418,"string":"202.736297"},"max_line_length":{"kind":"number","value":89526,"string":"89,526"},"alphanum_fraction":{"kind":"number","value":0.8754177148,"string":"0.875418"},"cells":{"kind":"list like","value":[[["# Axis Bank Stock Data Analysis Project Blog Post\n> Data Analysis of axis bank stock market time-series dataset.\n\n- toc: true \n- badges: true\n- comments: true\n- categories: [jupyter]\n- image: images/stockdataimg.jpg","_____no_output_____"],["## AxisBank Stock Data Analysis\n\nThe project is based on the dataset I obtained from kaggle. The Analysis I am performing is on the 'AXISBANK' stock market data from 2019-2021.AXISBANK is one of the stocks listed in NIFTY50 index. The NIFTY 50 is a benchmark Indian stock market index that represents the weighted average of 50 of the largest Indian companies listed on the National Stock Exchange. It is one of the two main stock indices used in India, the other being the BSE SENSEX. The Analysis is performed on the stock quote data of \"AXIS BANK\" from the dataset of NIFTY50 Stock Market data obtained from kaggle repo. \n\nAxis Bank Limited, formerly known as UTI Bank (1993–2007), is an Indian banking and financial services company headquartered in Mumbai, Maharashtra.It sells financial services to large and mid-size companies, SMEs and retail businesses.\n\nThe bank was founded on 3 December 1993 as UTI Bank, opening its registered office in Ahmedabad and a corporate office in Mumbai. The bank was promoted jointly by the Administrator of the Unit Trust of India (UTI), Life Insurance Corporation of India (LIC), General Insurance Corporation, National Insurance Company, The New India Assurance Company, The Oriental Insurance Corporation and United India Insurance Company. The first branch was inaugurated on 2 April 1994 in Ahmedabad by Manmohan Singh, then finance minister of India \\\nI chose this dataset because of the importance of NIFTY50 listed stocks on Indian economy. In most ways the NIFTY50 presents how well the Indian capital markets are doing.\n","_____no_output_____"],["## Downloading the Dataset\n\nIn this section of the Jupyter notebook we are going to download an interesting data set from kaggle dataset repositories. We are using python library called OpenDatasets for downloading from kaggle. While downloading we are asked for kaggle user id and API token key for accessing the dataset from kaggle. Kaggle is a platform used for obtaining datasets and various other datascience tasks. ","_____no_output_____"]],[["!pip install jovian opendatasets --upgrade --quiet","_____no_output_____"]],[["Let's begin by downloading the data, and listing the files within the dataset.","_____no_output_____"]],[["# Change this\ndataset_url = 'https://www.kaggle.com/rohanrao/nifty50-stock-market-data'","_____no_output_____"],["import opendatasets as od\nod.download(dataset_url)","Skipping, found downloaded files in \"./nifty50-stock-market-data\" (use force=True to force download)\n"]],[["The dataset has been downloaded and extracted.","_____no_output_____"]],[["# Change this\ndata_dir = './nifty50-stock-market-data'","_____no_output_____"],["import os\nos.listdir(data_dir)","_____no_output_____"]],[["Let us save and upload our work to Jovian before continuing.","_____no_output_____"]],[["project_name = \"nifty50-stockmarket-data\" # change this (use lowercase letters and hyphens only)","_____no_output_____"],["!pip install jovian --upgrade -q","_____no_output_____"],["import jovian","_____no_output_____"],["jovian.commit(project=project_name)","_____no_output_____"]],[["## Data Preparation and Cleaning\n\nData Preparation and Cleansing constitutes the first part of the Data Analysis project for any dataset. We do this process inorder to obtain retain valuable data from the data frame, one that is relevant for our analysis. The process is also used to remove erroneous values from the dataset(ex. NaN to 0). After the preparation of data and cleansing, the data can be used for analysis.
\nIn our dataframe we have a lot of non-releavant information, so we are going to drop few columns in the dataframe and fix some of the elements in data frame for better analysis. We are also going to change the Date column into DateTime format which can be further used to group the data by months/year.\n\n","_____no_output_____"]],[["import pandas as pd\nimport numpy as np\n","_____no_output_____"],["axis_df= pd.read_csv(data_dir + \"/AXISBANK.csv\")","_____no_output_____"],["axis_df.info()\n","\nRangeIndex: 5306 entries, 0 to 5305\nData columns (total 15 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 Date 5306 non-null object \n 1 Symbol 5306 non-null object \n 2 Series 5306 non-null object \n 3 Prev Close 5306 non-null float64\n 4 Open 5306 non-null float64\n 5 High 5306 non-null float64\n 6 Low 5306 non-null float64\n 7 Last 5306 non-null float64\n 8 Close 5306 non-null float64\n 9 VWAP 5306 non-null float64\n 10 Volume 5306 non-null int64 \n 11 Turnover 5306 non-null float64\n 12 Trades 2456 non-null float64\n 13 Deliverable Volume 4797 non-null float64\n 14 %Deliverble 4797 non-null float64\ndtypes: float64(11), int64(1), object(3)\nmemory usage: 621.9+ KB\n"],["axis_df.describe()\n","_____no_output_____"],["axis_df","_____no_output_____"],["axis_df['Symbol'] = np.where(axis_df['Symbol'] == 'UTIBANK', 'AXISBANK', axis_df['Symbol'])\naxis_df","_____no_output_____"],["axis_new_df = axis_df.drop(['Last','Series', 'VWAP', 'Trades','Deliverable Volume','%Deliverble'], axis=1)\n\naxis_new_df","_____no_output_____"],["def getIndexes(dfObj, value):\n ''' Get index positions of value in dataframe i.e. dfObj.'''\n listOfPos = list()\n # Get bool dataframe with True at positions where the given value exists\n result = dfObj.isin([value])\n # Get list of columns that contains the value\n seriesObj = result.any()\n columnNames = list(seriesObj[seriesObj == True].index)\n # Iterate over list of columns and fetch the rows indexes where value exists\n for col in columnNames:\n rows = list(result[col][result[col] == True].index)\n for row in rows:\n listOfPos.append((row, col))\n # Return a list of tuples indicating the positions of value in the dataframe\n return listOfPos\n","_____no_output_____"],["listOfPosition_axis = getIndexes(axis_df, '2019-01-01')\nlistOfPosition_axis","_____no_output_____"],["axis_new_df.drop(axis_new_df.loc[0:4728].index, inplace = True)","_____no_output_____"],["axis_new_df","_____no_output_____"]],[["## Summary of the operations done till now:\n1. we have taken a csv file containing stock data of AXIS BANK from the data set of nifty50 stocks and performed data cleansing operations on them.
\n2. Originally, the data from the data set is noticed as stock price quotations from the year 2001 but for our analysis we have taken data for the years 2019-2021
\n3. Then we have dropped the columns that are not relevant for our analysis by using pandas dataframe operations.","_____no_output_____"]],[["axis_new_df.reset_index(drop=True, inplace=True)\naxis_new_df","_____no_output_____"],["axis_new_df['Date'] = pd.to_datetime(axis_new_df['Date']) # we changed the Dates into Datetime format from the object format\naxis_new_df.info() ","\nRangeIndex: 577 entries, 0 to 576\nData columns (total 9 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 Date 577 non-null datetime64[ns]\n 1 Symbol 577 non-null object \n 2 Prev Close 577 non-null float64 \n 3 Open 577 non-null float64 \n 4 High 577 non-null float64 \n 5 Low 577 non-null float64 \n 6 Close 577 non-null float64 \n 7 Volume 577 non-null int64 \n 8 Turnover 577 non-null float64 \ndtypes: datetime64[ns](1), float64(6), int64(1), object(1)\nmemory usage: 40.7+ KB\n"],["axis_new_df['Daily Lag'] = axis_new_df['Close'].shift(1) # Added a new column Daily Lag to calculate daily returns of the stock\naxis_new_df['Daily Returns'] = (axis_new_df['Daily Lag']/axis_new_df['Close']) -1\n","_____no_output_____"],["axis_dailyret_df = axis_new_df.drop(['Prev Close', 'Open','High', 'Low','Close','Daily Lag'], axis = 1)","_____no_output_____"],["axis_dailyret_df","_____no_output_____"],["import jovian","_____no_output_____"],["jovian.commit()","_____no_output_____"]],[["## Exploratory Analysis and Visualization\n\n\n#### Here we compute the mean, max/min stock quotes of the stock AXISBANK. We specifically compute the mean of the Daily returns column. we are going to do the analysis by first converting the index datewise to month wise to have a good consolidated dataframe to analyze in broad timeline. we are going to divide the data frame into three for the years 2019, 2020, 2021 respectively, in order to analyze the yearly performance of the stock.\n","_____no_output_____"],["Let's begin by importing`matplotlib.pyplot` and `seaborn`.","_____no_output_____"]],[["import seaborn as sns\nimport matplotlib\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\nsns.set_style('darkgrid')\nmatplotlib.rcParams['font.size'] = 10\nmatplotlib.rcParams['figure.figsize'] = (15, 5)\nmatplotlib.rcParams['figure.facecolor'] = '#00000000'","_____no_output_____"]],[["Here we are going to explore the daily Returns column by plotting a line graph of daily returns v/s Months. Now we can see that daily returns are growing across months in the years 2019-2021.","_____no_output_____"]],[["\naxis_dailyret_plot=axis_dailyret_df.groupby(axis_dailyret_df['Date'].dt.strftime('%B'))['Daily Returns'].sum().sort_values()\nplt.plot(axis_dailyret_plot)","_____no_output_____"],["axis_new_df['Year'] = pd.DatetimeIndex(axis_new_df['Date']).year\naxis_new_df\n","_____no_output_____"],["axis2019_df = axis_new_df[axis_new_df.Year == 2019 ]\naxis2020_df = axis_new_df[axis_new_df.Year == 2020 ]\naxis2021_df = axis_new_df[axis_new_df.Year == 2021 ]","_____no_output_____"],["axis2019_df.reset_index(drop = True, inplace = True)\naxis2019_df","_____no_output_____"],["axis2020_df.reset_index(drop = True, inplace = True)\naxis2020_df","_____no_output_____"],["axis2021_df.reset_index(drop=True, inplace=True)\naxis2021_df","_____no_output_____"]],[["## Summary of above exploratory Analysis:\nIn the above code cells, we performed plotting of the data by exploring a column from the data. We have divided the DataFrame into three data frames containing the stock quote data from year-wise i.e., for the years 2019, 2020, 2021. For dividing the DataFrame year-wise we have added a new column called 'Year' which is generated from the DataTime values of the column \"Date\".\n\n\n","_____no_output_____"]],[["axis_range_df = axis_dailyret_df['Daily Returns'].max() - axis_dailyret_df['Daily Returns'].min()\naxis_range_df","_____no_output_____"],["axis_mean_df = axis_dailyret_df['Daily Returns'].mean()\naxis_mean_df","_____no_output_____"]],[["In the above two code cells, we have computed the range i.e. the difference between maximum and minimum value of the column. We have also calculated the mean of the daily returns of the Axis Bank stock.","_____no_output_____"],["## Exploratory Analysis of stock quotes year-wise for Axis Bank:\nIn this section we have plotted the Closing values of the stock throughout the year for the years 2019,2020,2021. We have only partial data for 2021(i.e. till Apr 2021). We have also done a plot to compare the performance throughout the year for the years 2019 and 2020(since we had full data for the respective years).\n","_____no_output_____"]],[["plt.plot(axis2019_df['Date'],axis2019_df['Close'] )\nplt.title('Closing Values of stock for the year 2019')\nplt.xlabel(None)\nplt.ylabel('Closing price of the stock')","_____no_output_____"],["plt.plot(axis2020_df['Date'],axis2020_df['Close'])\nplt.title('Closing Values of stock for the year 2020')\nplt.xlabel(None)\nplt.ylabel('Closing price of the stock')","_____no_output_____"],["plt.plot(axis2021_df['Date'],axis2021_df['Close'])\nplt.title('Closing Values of stock for the year 2021 Till April Month')\nplt.xlabel(None)\nplt.ylabel('Closing price of the stock')","_____no_output_____"]],[["**TODO** - Explore one or more columns by plotting a graph below, and add some explanation about it","_____no_output_____"]],[["plt.style.use('fivethirtyeight')\nplt.plot(axis2019_df['Date'], axis2019_df['Close'],linewidth=3, label = '2019')\nplt.plot(axis2020_df[\"Date\"],axis2020_df['Close'],linewidth=3, label = '2020')\nplt.legend(loc='best' )\nplt.title('Closing Values of stock for the years 2019 and 2020')\nplt.xlabel(None)\nplt.ylabel('Closing price of the stock')\n","_____no_output_____"],["print(plt.style.available)","['Solarize_Light2', '_classic_test_patch', 'bmh', 'classic', 'dark_background', 'fast', 'fivethirtyeight', 'ggplot', 'grayscale', 'seaborn', 'seaborn-bright', 'seaborn-colorblind', 'seaborn-dark', 'seaborn-dark-palette', 'seaborn-darkgrid', 'seaborn-deep', 'seaborn-muted', 'seaborn-notebook', 'seaborn-paper', 'seaborn-pastel', 'seaborn-poster', 'seaborn-talk', 'seaborn-ticks', 'seaborn-white', 'seaborn-whitegrid', 'tableau-colorblind10']\n"]],[["Let us save and upload our work to Jovian before continuing","_____no_output_____"]],[["import jovian","_____no_output_____"],["jovian.commit()","_____no_output_____"]],[["## Asking and Answering Questions\n\nIn this section, we are going to answer some of the questions regarding the dataset using various data analysis libraries like Numpy, Pandas, Matplotlib and seaborn. By using the tools we can see how useful the libraries come in handy while doing Inference on a dataset.\n\n","_____no_output_____"],["> Instructions (delete this cell)\n>\n> - Ask at least 5 interesting questions about your dataset\n> - Answer the questions either by computing the results using Numpy/Pandas or by plotting graphs using Matplotlib/Seaborn\n> - Create new columns, merge multiple dataset and perform grouping/aggregation wherever necessary\n> - Wherever you're using a library function from Pandas/Numpy/Matplotlib etc. explain briefly what it does\n\n","_____no_output_____"],["### Q1: What was the change in price and volume of the stock traded overtime?","_____no_output_____"]],[["plt.plot(axis2019_df['Date'], axis2019_df['Close'],linewidth=3, label = '2019')\nplt.plot(axis2020_df[\"Date\"],axis2020_df['Close'],linewidth=3, label = '2020')\nplt.plot(axis2021_df[\"Date\"], axis2021_df['Close'],linewidth = 3, label = '2021')\nplt.legend(loc='best' )\nplt.title('Closing Price of stock for the years 2019-2021(Till April)')\nplt.xlabel(None)\nplt.ylabel('Closing price of the stock')","_____no_output_____"],["print('The Maximum closing price of the stock during 2019-2021 is',axis_new_df['Close'].max())\nprint('The Minimum closing price of the stock during 2019-2021 is',axis_new_df['Close'].min())\nprint('The Index for the Maximum closing price in the dataframe is',getIndexes(axis_new_df, axis_new_df['Close'].max()))\nprint('The Index for the Minimum closing price in the dataframe is',getIndexes(axis_new_df, axis_new_df['Close'].min()))\nprint(axis_new_df.iloc[104])\nprint(axis_new_df.iloc[303])\n","The Maximum closing price of the stock during 2019-2021 is 822.8\nThe Minimum closing price of the stock during 2019-2021 is 303.15\nThe Index for the Maximum closing price in the dataframe is [(105, 'Prev Close'), (104, 'Close'), (105, 'Daily Lag')]\nThe Index for the Minimum closing price in the dataframe is [(304, 'Prev Close'), (303, 'Close'), (304, 'Daily Lag')]\nDate 2019-06-04 00:00:00\nSymbol AXISBANK\nPrev Close 812.65\nOpen 807.55\nHigh 827.75\nLow 805.5\nClose 822.8\nVolume 9515354\nTurnover 778700415970000.0\nDaily Lag 812.65\nDaily Returns -0.012336\nYear 2019\nName: 104, dtype: object\nDate 2020-03-24 00:00:00\nSymbol AXISBANK\nPrev Close 308.65\nOpen 331.95\nHigh 337.5\nLow 291.0\nClose 303.15\nVolume 50683611\nTurnover 1578313503950000.0\nDaily Lag 308.65\nDaily Returns 0.018143\nYear 2020\nName: 303, dtype: object\n"]],[["* As we can see from the above one of the two plots there was a dip in the closing price during the year 2020. The Maximum Closing price occurred on 2019-06-04(Close = 822.8). The lowest of closing price during the years occurred on 2020-03-24(Close = 303.15). This can say that the start of the pandemic has caused the steep down curve for the stock's closing price.","_____no_output_____"]],[["plt.plot(axis2019_df[\"Date\"],axis2019_df[\"Volume\"],linewidth=2, label = '2019')\nplt.plot(axis2020_df[\"Date\"],axis2020_df[\"Volume\"],linewidth=2, label = '2020')\nplt.plot(axis2021_df[\"Date\"],axis2021_df[\"Volume\"],linewidth=2, label = '2021')\nplt.legend(loc='best')\nplt.title('Volume of stock traded in the years 2019-2021(till April)')\nplt.ylabel('Volume')\nplt.xlabel(None)\n","_____no_output_____"],["print('The Maximum volume of the stock traded during 2019-2021 is',axis_new_df['Volume'].max())\nprint('The Minimum volume of the stock traded during 2019-2021 is',axis_new_df['Volume'].min())\nprint('The Index for the Maximum volume stock traded in the dataframe is',getIndexes(axis_new_df, axis_new_df['Volume'].max()))\nprint('The Index for the Minimum volume stock traded in the dataframe is',getIndexes(axis_new_df, axis_new_df['Volume'].min()))\nprint(axis_new_df.iloc[357])\nprint(axis_new_df.iloc[200])","The Maximum volume of the stock traded during 2019-2021 is 96190274\nThe Minimum volume of the stock traded during 2019-2021 is 965772\nThe Index for the Maximum volume stock traded in the dataframe is [(357, 'Volume')]\nThe Index for the Minimum volume stock traded in the dataframe is [(200, 'Volume')]\nDate 2020-06-16 00:00:00\nSymbol AXISBANK\nPrev Close 389.6\nOpen 404.9\nHigh 405.0\nLow 360.4\nClose 381.55\nVolume 96190274\nTurnover 3654065942305001.0\nDaily Lag 389.6\nDaily Returns 0.021098\nYear 2020\nName: 357, dtype: object\nDate 2019-10-27 00:00:00\nSymbol AXISBANK\nPrev Close 708.6\nOpen 711.0\nHigh 715.05\nLow 708.55\nClose 710.1\nVolume 965772\nTurnover 68696126654999.992188\nDaily Lag 708.6\nDaily Returns -0.002112\nYear 2019\nName: 200, dtype: object\n"]],[["As we can see from the above graph a lot of volume of trade happened during 2020. That means the stock was transacted a lot during the year 2020. The highest Volumed of stock is traded on 2020-06-16(Volume =96190274) and the Minimum volume of the stock traded during 2019-2021 is on 2019-10-27(Volume = 965772)","_____no_output_____"],["### Q2: What was the daily return of the stock on average?\n\nThe daily return measures the price change in a stock's price as a percentage of the previous day's closing price. A positive return means the stock has grown in value, while a negative return means it has lost value. we will also attempt to calculate the maximum daily return of the stock during 2019-2021.","_____no_output_____"]],[["#axis_new_df['Daily Returns'].plot(title='Axis Bank Daily Returns')\nplt.plot(axis_new_df['Date'],axis_new_df['Daily Returns'], linewidth=2 ,label = 'Daily Returns')\nplt.legend(loc='best' )\nplt.title('Daily Returns of stock for the years 2019-2021(Till April)')\nplt.xlabel(None)\nplt.ylabel('Daily Returns of the stock')","_____no_output_____"],["plt.plot(axis_new_df['Date'],axis_new_df['Daily Returns'], linestyle='--', marker='o')\nplt.title('Daily Returns of stock for the years 2019-2021(Till April)')\nplt.xlabel(None)\nplt.ylabel('Daily Returns of the stock')","_____no_output_____"],["print('The Maximum daily return during the years 2020 is',axis_new_df['Daily Returns'].max())\nindex = getIndexes(axis_new_df, axis_new_df['Daily Returns'].max())\naxis_new_df.iloc[302]","The Maximum daily return during the years 2020 is 0.3871699335817269\n"],["def getIndexes(dfObj, value):\n ''' Get index positions of value in dataframe i.e. dfObj.'''\n listOfPos = list()\n # Get bool dataframe with True at positions where the given value exists\n result = dfObj.isin([value])\n # Get list of columns that contains the value\n seriesObj = result.any()\n columnNames = list(seriesObj[seriesObj == True].index)\n # Iterate over list of columns and fetch the rows indexes where value exists\n for col in columnNames:\n rows = list(result[col][result[col] == True].index)\n for row in rows:\n listOfPos.append((row, col))\n # Return a list of tuples indicating the positions of value in the dataframe\n return listOfPos","_____no_output_____"]],[["As we can see from the plot there were high daily returns for the stock around late March 2020 and then there was ups and downs from April- July 2020 . we can see that the most changes in daily returns occurred during April 2020 - July 2020 and at other times the daily returns were almost flat. The maximum daily returns for the stock during 2019-2021 occurred on 2020-03-23(observed from the pandas table above).","_____no_output_____"]],[["Avgdailyret_2019 =axis2019_df['Daily Returns'].sum()/len(axis2019_df['Daily Returns'])\nAvgdailyret_2020 =axis2020_df['Daily Returns'].sum()/len(axis2020_df['Daily Returns'])\nAvgdailyret_2021 =axis2021_df['Daily Returns'].sum()/len(axis2021_df['Daily Returns'])\n\n# create a dataset\ndata_dailyret = {'2019': Avgdailyret_2019, '2020':Avgdailyret_2020, '2021':Avgdailyret_2021}\nYears = list(data_dailyret.keys())\nAvgdailyret = list(data_dailyret.values())\n\n# plotting a bar chart\nplt.figure(figsize=(10, 7))\nplt.bar(Years, Avgdailyret, color ='maroon',width = 0.3)\nplt.xlabel(\"Years\")\nplt.ylabel(\"Average Daily Returns of the Stock Traded\")\nplt.title(\"Average Daily Returns of the Stock over the years 2019-2021(Till April) (in 10^7)\")\nplt.show()\n ","_____no_output_____"],["plt.figure(figsize=(12, 7))\nsns.distplot(axis_new_df['Daily Returns'].dropna(), bins=100, color='purple')\nplt.title(' Histogram of Daily Returns')\nplt.tight_layout()","/opt/conda/lib/python3.9/site-packages/seaborn/distributions.py:2619: FutureWarning: `distplot` is a deprecated function and will be removed in a future version. Please adapt your code to use either `displot` (a figure-level function with similar flexibility) or `histplot` (an axes-level function for histograms).\n warnings.warn(msg, FutureWarning)\n"]],[["### Q3: What is the Average Trading volume of the stock for past three years?","_____no_output_____"]],[["Avgvol_2019 =axis2019_df['Volume'].sum()/len(axis2019_df['Volume'])\nAvgvol_2020 =axis2020_df['Volume'].sum()/len(axis2020_df['Volume'])\nAvgvol_2021 =axis2021_df['Volume'].sum()/len(axis2021_df['Volume'])\n# create a dataset\ndata_volume = {'2019': Avgvol_2019, '2020':Avgvol_2020, '2021':Avgvol_2021}\nYears = list(data_volume.keys())\nAvgVol = list(data_volume.values())\n# plotting a bar chart\nplt.figure(figsize=(13, 7))\nplt.bar(Years, AvgVol, color ='maroon',width = 0.3)\nplt.xlabel(\"Years\")\nplt.ylabel(\"Average Volume of the Stock Traded\")\nplt.title(\"Average Trading volume of the Stock over the years 2019-2021(Till April) (in 10^7)\")\nplt.show()\n ","_____no_output_____"]],[["From the above plot we can say that more volume of the Axis Bank stock is traded during the year 2020. We can see a significant rise in the trading volume of the stock from 2019 to 2020. ","_____no_output_____"],["### Q4: What is the Average Closing price of the stock for past three years?","_____no_output_____"]],[["Avgclose_2019 =axis2019_df['Close'].sum()/len(axis2019_df['Close'])\nAvgclose_2020 =axis2020_df['Close'].sum()/len(axis2020_df['Close'])\nAvgclose_2021 =axis2021_df['Close'].sum()/len(axis2021_df['Close'])\n# create a dataset\ndata_volume = {'2019': Avgclose_2019, '2020':Avgclose_2020, '2021':Avgclose_2021}\nYears = list(data_volume.keys())\nAvgClose = list(data_volume.values())\n# plotting a bar chart\nplt.figure(figsize=(13, 7))\nplt.bar(Years, AvgClose, color ='maroon',width = 0.3)\nplt.xlabel(\"Years\")\nplt.ylabel(\"Average Closding Price of the Stock Traded\")\nplt.title(\"Average Closing price of the Stock over the years 2019-2021(Till April) (in 10^7)\")\nplt.show()\n ","_____no_output_____"]],[["We have seen the Trading Volume of the stock is more during the year 2020. In contrast, the Year 2020 has the lowest average closing price among the other two. But for the years 2019 and 2021 the Average closing price is almost same, there is not much change in the value.","_____no_output_____"],[" ","_____no_output_____"],["Let us save and upload our work to Jovian before continuing.","_____no_output_____"]],[["import jovian","_____no_output_____"],["jovian.commit()","_____no_output_____"]],[["## Inferences and Conclusion\n\nInferences : The above data analysis is done on the data set of stock quotes for AXIS BANK during the years 2019-2021. From the Analysis we can say that during the year 2020 there has been a lot of unsteady growth, there has been rise in the volume of stock traded on the exchange, that means there has been a lot of transactions of the stock. The stock has seen a swift traffic in buy/sell during the year 2020 and has fallen back to normal in the year 2021. In contrast to the volume of the stock the closing price of the stock has decreased during the year 2020, which can be concluded as the volume of the stock traded has no relation to the price change of the stock(while most people think there can be a correlation among the two values). The price decrease for the stock may have been due to the pandemic rise in India during the year 2020. ","_____no_output_____"]],[["import jovian","_____no_output_____"],["jovian.commit()","_____no_output_____"]],[["## References and Future Work\n\nFuture Ideas for the Analyis:\n* I am planning to go forward with this basic Analysis of the AXISBANK stock quotes and build a Machine Learning model predicting the future stock prices.\n* I plan to automate the Data Analysis process for every stock in the NIFTY50 Index by defining reusable functions and automating the Analysis procedures.\n* Study more strong correlations between the different quotes of the stock and analyze how and why they are related in that fashion. \n\nREFRENCES/LINKS USED FOR THIS PROJECT :\n* https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.drop.html\n* https://stackoverflow.com/questions/16683701/in-pandas-how-to-get-the-index-of-a-known-value\n* https://towardsdatascience.com/working-with-datetime-in-pandas-dataframe-663f7af6c587\n* https://thispointer.com/python-find-indexes-of-an-element-in-pandas-dataframe/\n* https://pandas.pydata.org/pandas-docs/stable/user_guide/merging.html#timeseries-friendly-merging\n* https://pandas.pydata.org/pandas-docs/stable/user_guide/merging.html\n* https://towardsdatascience.com/financial-analytics-exploratory-data-analysis-of-stock-data-d98cbadf98b9\n* https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.transpose.html\n* https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.set_index.html\n* https://pandas.pydata.org/docs/reference/api/pandas.merge.html\n* https://stackoverflow.com/questions/14661701/how-to-drop-a-list-of-rows-from-pandas-dataframe\n* https://www.interviewqs.com/ddi-code-snippets/extract-month-year-pandas\n* https://stackoverflow.com/questions/18172851/deleting-dataframe-row-in-pandas-based-on-column-value\n* https://queirozf.com/entries/matplotlib-examples-displaying-and-configuring-legends\n* https://jakevdp.github.io/PythonDataScienceHandbook/04.06-customizing-legends.html\n* https://matplotlib.org/stable/tutorials/intermediate/legend_guide.html\n* https://matplotlib.org/devdocs/gallery/subplots_axes_and_figures/subplots_demo.html\n* https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.subplots.html\n* https://stackoverflow.com/questions/332289/how-do-you-change-the-size-of-figures-drawn-with-matplotlib\n* https://www.investopedia.com/articles/investing/093014/stock-quotes-explained.asp\n* https://stackoverflow.com/questions/44908383/how-can-i-group-by-month-from-a-datefield-using-python-pandas\n* https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.hist.html\n* https://note.nkmk.me/en/python-pandas-dataframe-rename/\n* https://stackoverflow.com/questions/24748848/pandas-find-the-maximum-range-in-all-the-columns-of-dataframe\n* https://stackoverflow.com/questions/29233283/plotting-multiple-lines-in-different-colors-with-pandas-dataframe\n* https://jakevdp.github.io/PythonDataScienceHandbook/04.14-visualization-with-seaborn.html\n* https://www.geeksforgeeks.org/python-pandas-extracting-rows-using-loc/","_____no_output_____"]],[["import jovian","_____no_output_____"],["jovian.commit()","_____no_output_____"]]],"string":"[\n [\n [\n \"# Axis Bank Stock Data Analysis Project Blog Post\\n> Data Analysis of axis bank stock market time-series dataset.\\n\\n- toc: true \\n- badges: true\\n- comments: true\\n- categories: [jupyter]\\n- image: images/stockdataimg.jpg\",\n \"_____no_output_____\"\n ],\n [\n \"## AxisBank Stock Data Analysis\\n\\nThe project is based on the dataset I obtained from kaggle. The Analysis I am performing is on the 'AXISBANK' stock market data from 2019-2021.AXISBANK is one of the stocks listed in NIFTY50 index. The NIFTY 50 is a benchmark Indian stock market index that represents the weighted average of 50 of the largest Indian companies listed on the National Stock Exchange. It is one of the two main stock indices used in India, the other being the BSE SENSEX. The Analysis is performed on the stock quote data of \\\"AXIS BANK\\\" from the dataset of NIFTY50 Stock Market data obtained from kaggle repo. \\n\\nAxis Bank Limited, formerly known as UTI Bank (1993–2007), is an Indian banking and financial services company headquartered in Mumbai, Maharashtra.It sells financial services to large and mid-size companies, SMEs and retail businesses.\\n\\nThe bank was founded on 3 December 1993 as UTI Bank, opening its registered office in Ahmedabad and a corporate office in Mumbai. The bank was promoted jointly by the Administrator of the Unit Trust of India (UTI), Life Insurance Corporation of India (LIC), General Insurance Corporation, National Insurance Company, The New India Assurance Company, The Oriental Insurance Corporation and United India Insurance Company. The first branch was inaugurated on 2 April 1994 in Ahmedabad by Manmohan Singh, then finance minister of India \\\\\\nI chose this dataset because of the importance of NIFTY50 listed stocks on Indian economy. In most ways the NIFTY50 presents how well the Indian capital markets are doing.\\n\",\n \"_____no_output_____\"\n ],\n [\n \"## Downloading the Dataset\\n\\nIn this section of the Jupyter notebook we are going to download an interesting data set from kaggle dataset repositories. We are using python library called OpenDatasets for downloading from kaggle. While downloading we are asked for kaggle user id and API token key for accessing the dataset from kaggle. Kaggle is a platform used for obtaining datasets and various other datascience tasks. \",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"!pip install jovian opendatasets --upgrade --quiet\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"Let's begin by downloading the data, and listing the files within the dataset.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# Change this\\ndataset_url = 'https://www.kaggle.com/rohanrao/nifty50-stock-market-data'\",\n \"_____no_output_____\"\n ],\n [\n \"import opendatasets as od\\nod.download(dataset_url)\",\n \"Skipping, found downloaded files in \\\"./nifty50-stock-market-data\\\" (use force=True to force download)\\n\"\n ]\n ],\n [\n [\n \"The dataset has been downloaded and extracted.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# Change this\\ndata_dir = './nifty50-stock-market-data'\",\n \"_____no_output_____\"\n ],\n [\n \"import os\\nos.listdir(data_dir)\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"Let us save and upload our work to Jovian before continuing.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"project_name = \\\"nifty50-stockmarket-data\\\" # change this (use lowercase letters and hyphens only)\",\n \"_____no_output_____\"\n ],\n [\n \"!pip install jovian --upgrade -q\",\n \"_____no_output_____\"\n ],\n [\n \"import jovian\",\n \"_____no_output_____\"\n ],\n [\n \"jovian.commit(project=project_name)\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"## Data Preparation and Cleaning\\n\\nData Preparation and Cleansing constitutes the first part of the Data Analysis project for any dataset. We do this process inorder to obtain retain valuable data from the data frame, one that is relevant for our analysis. The process is also used to remove erroneous values from the dataset(ex. NaN to 0). After the preparation of data and cleansing, the data can be used for analysis.
\\nIn our dataframe we have a lot of non-releavant information, so we are going to drop few columns in the dataframe and fix some of the elements in data frame for better analysis. We are also going to change the Date column into DateTime format which can be further used to group the data by months/year.\\n\\n\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"import pandas as pd\\nimport numpy as np\\n\",\n \"_____no_output_____\"\n ],\n [\n \"axis_df= pd.read_csv(data_dir + \\\"/AXISBANK.csv\\\")\",\n \"_____no_output_____\"\n ],\n [\n \"axis_df.info()\\n\",\n \"\\nRangeIndex: 5306 entries, 0 to 5305\\nData columns (total 15 columns):\\n # Column Non-Null Count Dtype \\n--- ------ -------------- ----- \\n 0 Date 5306 non-null object \\n 1 Symbol 5306 non-null object \\n 2 Series 5306 non-null object \\n 3 Prev Close 5306 non-null float64\\n 4 Open 5306 non-null float64\\n 5 High 5306 non-null float64\\n 6 Low 5306 non-null float64\\n 7 Last 5306 non-null float64\\n 8 Close 5306 non-null float64\\n 9 VWAP 5306 non-null float64\\n 10 Volume 5306 non-null int64 \\n 11 Turnover 5306 non-null float64\\n 12 Trades 2456 non-null float64\\n 13 Deliverable Volume 4797 non-null float64\\n 14 %Deliverble 4797 non-null float64\\ndtypes: float64(11), int64(1), object(3)\\nmemory usage: 621.9+ KB\\n\"\n ],\n [\n \"axis_df.describe()\\n\",\n \"_____no_output_____\"\n ],\n [\n \"axis_df\",\n \"_____no_output_____\"\n ],\n [\n \"axis_df['Symbol'] = np.where(axis_df['Symbol'] == 'UTIBANK', 'AXISBANK', axis_df['Symbol'])\\naxis_df\",\n \"_____no_output_____\"\n ],\n [\n \"axis_new_df = axis_df.drop(['Last','Series', 'VWAP', 'Trades','Deliverable Volume','%Deliverble'], axis=1)\\n\\naxis_new_df\",\n \"_____no_output_____\"\n ],\n [\n \"def getIndexes(dfObj, value):\\n ''' Get index positions of value in dataframe i.e. dfObj.'''\\n listOfPos = list()\\n # Get bool dataframe with True at positions where the given value exists\\n result = dfObj.isin([value])\\n # Get list of columns that contains the value\\n seriesObj = result.any()\\n columnNames = list(seriesObj[seriesObj == True].index)\\n # Iterate over list of columns and fetch the rows indexes where value exists\\n for col in columnNames:\\n rows = list(result[col][result[col] == True].index)\\n for row in rows:\\n listOfPos.append((row, col))\\n # Return a list of tuples indicating the positions of value in the dataframe\\n return listOfPos\\n\",\n \"_____no_output_____\"\n ],\n [\n \"listOfPosition_axis = getIndexes(axis_df, '2019-01-01')\\nlistOfPosition_axis\",\n \"_____no_output_____\"\n ],\n [\n \"axis_new_df.drop(axis_new_df.loc[0:4728].index, inplace = True)\",\n \"_____no_output_____\"\n ],\n [\n \"axis_new_df\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"## Summary of the operations done till now:\\n1. we have taken a csv file containing stock data of AXIS BANK from the data set of nifty50 stocks and performed data cleansing operations on them.
\\n2. Originally, the data from the data set is noticed as stock price quotations from the year 2001 but for our analysis we have taken data for the years 2019-2021
\\n3. Then we have dropped the columns that are not relevant for our analysis by using pandas dataframe operations.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"axis_new_df.reset_index(drop=True, inplace=True)\\naxis_new_df\",\n \"_____no_output_____\"\n ],\n [\n \"axis_new_df['Date'] = pd.to_datetime(axis_new_df['Date']) # we changed the Dates into Datetime format from the object format\\naxis_new_df.info() \",\n \"\\nRangeIndex: 577 entries, 0 to 576\\nData columns (total 9 columns):\\n # Column Non-Null Count Dtype \\n--- ------ -------------- ----- \\n 0 Date 577 non-null datetime64[ns]\\n 1 Symbol 577 non-null object \\n 2 Prev Close 577 non-null float64 \\n 3 Open 577 non-null float64 \\n 4 High 577 non-null float64 \\n 5 Low 577 non-null float64 \\n 6 Close 577 non-null float64 \\n 7 Volume 577 non-null int64 \\n 8 Turnover 577 non-null float64 \\ndtypes: datetime64[ns](1), float64(6), int64(1), object(1)\\nmemory usage: 40.7+ KB\\n\"\n ],\n [\n \"axis_new_df['Daily Lag'] = axis_new_df['Close'].shift(1) # Added a new column Daily Lag to calculate daily returns of the stock\\naxis_new_df['Daily Returns'] = (axis_new_df['Daily Lag']/axis_new_df['Close']) -1\\n\",\n \"_____no_output_____\"\n ],\n [\n \"axis_dailyret_df = axis_new_df.drop(['Prev Close', 'Open','High', 'Low','Close','Daily Lag'], axis = 1)\",\n \"_____no_output_____\"\n ],\n [\n \"axis_dailyret_df\",\n \"_____no_output_____\"\n ],\n [\n \"import jovian\",\n \"_____no_output_____\"\n ],\n [\n \"jovian.commit()\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"## Exploratory Analysis and Visualization\\n\\n\\n#### Here we compute the mean, max/min stock quotes of the stock AXISBANK. We specifically compute the mean of the Daily returns column. we are going to do the analysis by first converting the index datewise to month wise to have a good consolidated dataframe to analyze in broad timeline. we are going to divide the data frame into three for the years 2019, 2020, 2021 respectively, in order to analyze the yearly performance of the stock.\\n\",\n \"_____no_output_____\"\n ],\n [\n \"Let's begin by importing`matplotlib.pyplot` and `seaborn`.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"import seaborn as sns\\nimport matplotlib\\nimport matplotlib.pyplot as plt\\n%matplotlib inline\\n\\nsns.set_style('darkgrid')\\nmatplotlib.rcParams['font.size'] = 10\\nmatplotlib.rcParams['figure.figsize'] = (15, 5)\\nmatplotlib.rcParams['figure.facecolor'] = '#00000000'\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"Here we are going to explore the daily Returns column by plotting a line graph of daily returns v/s Months. Now we can see that daily returns are growing across months in the years 2019-2021.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"\\naxis_dailyret_plot=axis_dailyret_df.groupby(axis_dailyret_df['Date'].dt.strftime('%B'))['Daily Returns'].sum().sort_values()\\nplt.plot(axis_dailyret_plot)\",\n \"_____no_output_____\"\n ],\n [\n \"axis_new_df['Year'] = pd.DatetimeIndex(axis_new_df['Date']).year\\naxis_new_df\\n\",\n \"_____no_output_____\"\n ],\n [\n \"axis2019_df = axis_new_df[axis_new_df.Year == 2019 ]\\naxis2020_df = axis_new_df[axis_new_df.Year == 2020 ]\\naxis2021_df = axis_new_df[axis_new_df.Year == 2021 ]\",\n \"_____no_output_____\"\n ],\n [\n \"axis2019_df.reset_index(drop = True, inplace = True)\\naxis2019_df\",\n \"_____no_output_____\"\n ],\n [\n \"axis2020_df.reset_index(drop = True, inplace = True)\\naxis2020_df\",\n \"_____no_output_____\"\n ],\n [\n \"axis2021_df.reset_index(drop=True, inplace=True)\\naxis2021_df\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"## Summary of above exploratory Analysis:\\nIn the above code cells, we performed plotting of the data by exploring a column from the data. We have divided the DataFrame into three data frames containing the stock quote data from year-wise i.e., for the years 2019, 2020, 2021. For dividing the DataFrame year-wise we have added a new column called 'Year' which is generated from the DataTime values of the column \\\"Date\\\".\\n\\n\\n\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"axis_range_df = axis_dailyret_df['Daily Returns'].max() - axis_dailyret_df['Daily Returns'].min()\\naxis_range_df\",\n \"_____no_output_____\"\n ],\n [\n \"axis_mean_df = axis_dailyret_df['Daily Returns'].mean()\\naxis_mean_df\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"In the above two code cells, we have computed the range i.e. the difference between maximum and minimum value of the column. We have also calculated the mean of the daily returns of the Axis Bank stock.\",\n \"_____no_output_____\"\n ],\n [\n \"## Exploratory Analysis of stock quotes year-wise for Axis Bank:\\nIn this section we have plotted the Closing values of the stock throughout the year for the years 2019,2020,2021. We have only partial data for 2021(i.e. till Apr 2021). We have also done a plot to compare the performance throughout the year for the years 2019 and 2020(since we had full data for the respective years).\\n\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"plt.plot(axis2019_df['Date'],axis2019_df['Close'] )\\nplt.title('Closing Values of stock for the year 2019')\\nplt.xlabel(None)\\nplt.ylabel('Closing price of the stock')\",\n \"_____no_output_____\"\n ],\n [\n \"plt.plot(axis2020_df['Date'],axis2020_df['Close'])\\nplt.title('Closing Values of stock for the year 2020')\\nplt.xlabel(None)\\nplt.ylabel('Closing price of the stock')\",\n \"_____no_output_____\"\n ],\n [\n \"plt.plot(axis2021_df['Date'],axis2021_df['Close'])\\nplt.title('Closing Values of stock for the year 2021 Till April Month')\\nplt.xlabel(None)\\nplt.ylabel('Closing price of the stock')\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"**TODO** - Explore one or more columns by plotting a graph below, and add some explanation about it\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"plt.style.use('fivethirtyeight')\\nplt.plot(axis2019_df['Date'], axis2019_df['Close'],linewidth=3, label = '2019')\\nplt.plot(axis2020_df[\\\"Date\\\"],axis2020_df['Close'],linewidth=3, label = '2020')\\nplt.legend(loc='best' )\\nplt.title('Closing Values of stock for the years 2019 and 2020')\\nplt.xlabel(None)\\nplt.ylabel('Closing price of the stock')\\n\",\n \"_____no_output_____\"\n ],\n [\n \"print(plt.style.available)\",\n \"['Solarize_Light2', '_classic_test_patch', 'bmh', 'classic', 'dark_background', 'fast', 'fivethirtyeight', 'ggplot', 'grayscale', 'seaborn', 'seaborn-bright', 'seaborn-colorblind', 'seaborn-dark', 'seaborn-dark-palette', 'seaborn-darkgrid', 'seaborn-deep', 'seaborn-muted', 'seaborn-notebook', 'seaborn-paper', 'seaborn-pastel', 'seaborn-poster', 'seaborn-talk', 'seaborn-ticks', 'seaborn-white', 'seaborn-whitegrid', 'tableau-colorblind10']\\n\"\n ]\n ],\n [\n [\n \"Let us save and upload our work to Jovian before continuing\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"import jovian\",\n \"_____no_output_____\"\n ],\n [\n \"jovian.commit()\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"## Asking and Answering Questions\\n\\nIn this section, we are going to answer some of the questions regarding the dataset using various data analysis libraries like Numpy, Pandas, Matplotlib and seaborn. By using the tools we can see how useful the libraries come in handy while doing Inference on a dataset.\\n\\n\",\n \"_____no_output_____\"\n ],\n [\n \"> Instructions (delete this cell)\\n>\\n> - Ask at least 5 interesting questions about your dataset\\n> - Answer the questions either by computing the results using Numpy/Pandas or by plotting graphs using Matplotlib/Seaborn\\n> - Create new columns, merge multiple dataset and perform grouping/aggregation wherever necessary\\n> - Wherever you're using a library function from Pandas/Numpy/Matplotlib etc. explain briefly what it does\\n\\n\",\n \"_____no_output_____\"\n ],\n [\n \"### Q1: What was the change in price and volume of the stock traded overtime?\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"plt.plot(axis2019_df['Date'], axis2019_df['Close'],linewidth=3, label = '2019')\\nplt.plot(axis2020_df[\\\"Date\\\"],axis2020_df['Close'],linewidth=3, label = '2020')\\nplt.plot(axis2021_df[\\\"Date\\\"], axis2021_df['Close'],linewidth = 3, label = '2021')\\nplt.legend(loc='best' )\\nplt.title('Closing Price of stock for the years 2019-2021(Till April)')\\nplt.xlabel(None)\\nplt.ylabel('Closing price of the stock')\",\n \"_____no_output_____\"\n ],\n [\n \"print('The Maximum closing price of the stock during 2019-2021 is',axis_new_df['Close'].max())\\nprint('The Minimum closing price of the stock during 2019-2021 is',axis_new_df['Close'].min())\\nprint('The Index for the Maximum closing price in the dataframe is',getIndexes(axis_new_df, axis_new_df['Close'].max()))\\nprint('The Index for the Minimum closing price in the dataframe is',getIndexes(axis_new_df, axis_new_df['Close'].min()))\\nprint(axis_new_df.iloc[104])\\nprint(axis_new_df.iloc[303])\\n\",\n \"The Maximum closing price of the stock during 2019-2021 is 822.8\\nThe Minimum closing price of the stock during 2019-2021 is 303.15\\nThe Index for the Maximum closing price in the dataframe is [(105, 'Prev Close'), (104, 'Close'), (105, 'Daily Lag')]\\nThe Index for the Minimum closing price in the dataframe is [(304, 'Prev Close'), (303, 'Close'), (304, 'Daily Lag')]\\nDate 2019-06-04 00:00:00\\nSymbol AXISBANK\\nPrev Close 812.65\\nOpen 807.55\\nHigh 827.75\\nLow 805.5\\nClose 822.8\\nVolume 9515354\\nTurnover 778700415970000.0\\nDaily Lag 812.65\\nDaily Returns -0.012336\\nYear 2019\\nName: 104, dtype: object\\nDate 2020-03-24 00:00:00\\nSymbol AXISBANK\\nPrev Close 308.65\\nOpen 331.95\\nHigh 337.5\\nLow 291.0\\nClose 303.15\\nVolume 50683611\\nTurnover 1578313503950000.0\\nDaily Lag 308.65\\nDaily Returns 0.018143\\nYear 2020\\nName: 303, dtype: object\\n\"\n ]\n ],\n [\n [\n \"* As we can see from the above one of the two plots there was a dip in the closing price during the year 2020. The Maximum Closing price occurred on 2019-06-04(Close = 822.8). The lowest of closing price during the years occurred on 2020-03-24(Close = 303.15). This can say that the start of the pandemic has caused the steep down curve for the stock's closing price.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"plt.plot(axis2019_df[\\\"Date\\\"],axis2019_df[\\\"Volume\\\"],linewidth=2, label = '2019')\\nplt.plot(axis2020_df[\\\"Date\\\"],axis2020_df[\\\"Volume\\\"],linewidth=2, label = '2020')\\nplt.plot(axis2021_df[\\\"Date\\\"],axis2021_df[\\\"Volume\\\"],linewidth=2, label = '2021')\\nplt.legend(loc='best')\\nplt.title('Volume of stock traded in the years 2019-2021(till April)')\\nplt.ylabel('Volume')\\nplt.xlabel(None)\\n\",\n \"_____no_output_____\"\n ],\n [\n \"print('The Maximum volume of the stock traded during 2019-2021 is',axis_new_df['Volume'].max())\\nprint('The Minimum volume of the stock traded during 2019-2021 is',axis_new_df['Volume'].min())\\nprint('The Index for the Maximum volume stock traded in the dataframe is',getIndexes(axis_new_df, axis_new_df['Volume'].max()))\\nprint('The Index for the Minimum volume stock traded in the dataframe is',getIndexes(axis_new_df, axis_new_df['Volume'].min()))\\nprint(axis_new_df.iloc[357])\\nprint(axis_new_df.iloc[200])\",\n \"The Maximum volume of the stock traded during 2019-2021 is 96190274\\nThe Minimum volume of the stock traded during 2019-2021 is 965772\\nThe Index for the Maximum volume stock traded in the dataframe is [(357, 'Volume')]\\nThe Index for the Minimum volume stock traded in the dataframe is [(200, 'Volume')]\\nDate 2020-06-16 00:00:00\\nSymbol AXISBANK\\nPrev Close 389.6\\nOpen 404.9\\nHigh 405.0\\nLow 360.4\\nClose 381.55\\nVolume 96190274\\nTurnover 3654065942305001.0\\nDaily Lag 389.6\\nDaily Returns 0.021098\\nYear 2020\\nName: 357, dtype: object\\nDate 2019-10-27 00:00:00\\nSymbol AXISBANK\\nPrev Close 708.6\\nOpen 711.0\\nHigh 715.05\\nLow 708.55\\nClose 710.1\\nVolume 965772\\nTurnover 68696126654999.992188\\nDaily Lag 708.6\\nDaily Returns -0.002112\\nYear 2019\\nName: 200, dtype: object\\n\"\n ]\n ],\n [\n [\n \"As we can see from the above graph a lot of volume of trade happened during 2020. That means the stock was transacted a lot during the year 2020. The highest Volumed of stock is traded on 2020-06-16(Volume =96190274) and the Minimum volume of the stock traded during 2019-2021 is on 2019-10-27(Volume = 965772)\",\n \"_____no_output_____\"\n ],\n [\n \"### Q2: What was the daily return of the stock on average?\\n\\nThe daily return measures the price change in a stock's price as a percentage of the previous day's closing price. A positive return means the stock has grown in value, while a negative return means it has lost value. we will also attempt to calculate the maximum daily return of the stock during 2019-2021.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"#axis_new_df['Daily Returns'].plot(title='Axis Bank Daily Returns')\\nplt.plot(axis_new_df['Date'],axis_new_df['Daily Returns'], linewidth=2 ,label = 'Daily Returns')\\nplt.legend(loc='best' )\\nplt.title('Daily Returns of stock for the years 2019-2021(Till April)')\\nplt.xlabel(None)\\nplt.ylabel('Daily Returns of the stock')\",\n \"_____no_output_____\"\n ],\n [\n \"plt.plot(axis_new_df['Date'],axis_new_df['Daily Returns'], linestyle='--', marker='o')\\nplt.title('Daily Returns of stock for the years 2019-2021(Till April)')\\nplt.xlabel(None)\\nplt.ylabel('Daily Returns of the stock')\",\n \"_____no_output_____\"\n ],\n [\n \"print('The Maximum daily return during the years 2020 is',axis_new_df['Daily Returns'].max())\\nindex = getIndexes(axis_new_df, axis_new_df['Daily Returns'].max())\\naxis_new_df.iloc[302]\",\n \"The Maximum daily return during the years 2020 is 0.3871699335817269\\n\"\n ],\n [\n \"def getIndexes(dfObj, value):\\n ''' Get index positions of value in dataframe i.e. dfObj.'''\\n listOfPos = list()\\n # Get bool dataframe with True at positions where the given value exists\\n result = dfObj.isin([value])\\n # Get list of columns that contains the value\\n seriesObj = result.any()\\n columnNames = list(seriesObj[seriesObj == True].index)\\n # Iterate over list of columns and fetch the rows indexes where value exists\\n for col in columnNames:\\n rows = list(result[col][result[col] == True].index)\\n for row in rows:\\n listOfPos.append((row, col))\\n # Return a list of tuples indicating the positions of value in the dataframe\\n return listOfPos\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"As we can see from the plot there were high daily returns for the stock around late March 2020 and then there was ups and downs from April- July 2020 . we can see that the most changes in daily returns occurred during April 2020 - July 2020 and at other times the daily returns were almost flat. The maximum daily returns for the stock during 2019-2021 occurred on 2020-03-23(observed from the pandas table above).\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"Avgdailyret_2019 =axis2019_df['Daily Returns'].sum()/len(axis2019_df['Daily Returns'])\\nAvgdailyret_2020 =axis2020_df['Daily Returns'].sum()/len(axis2020_df['Daily Returns'])\\nAvgdailyret_2021 =axis2021_df['Daily Returns'].sum()/len(axis2021_df['Daily Returns'])\\n\\n# create a dataset\\ndata_dailyret = {'2019': Avgdailyret_2019, '2020':Avgdailyret_2020, '2021':Avgdailyret_2021}\\nYears = list(data_dailyret.keys())\\nAvgdailyret = list(data_dailyret.values())\\n\\n# plotting a bar chart\\nplt.figure(figsize=(10, 7))\\nplt.bar(Years, Avgdailyret, color ='maroon',width = 0.3)\\nplt.xlabel(\\\"Years\\\")\\nplt.ylabel(\\\"Average Daily Returns of the Stock Traded\\\")\\nplt.title(\\\"Average Daily Returns of the Stock over the years 2019-2021(Till April) (in 10^7)\\\")\\nplt.show()\\n \",\n \"_____no_output_____\"\n ],\n [\n \"plt.figure(figsize=(12, 7))\\nsns.distplot(axis_new_df['Daily Returns'].dropna(), bins=100, color='purple')\\nplt.title(' Histogram of Daily Returns')\\nplt.tight_layout()\",\n \"/opt/conda/lib/python3.9/site-packages/seaborn/distributions.py:2619: FutureWarning: `distplot` is a deprecated function and will be removed in a future version. Please adapt your code to use either `displot` (a figure-level function with similar flexibility) or `histplot` (an axes-level function for histograms).\\n warnings.warn(msg, FutureWarning)\\n\"\n ]\n ],\n [\n [\n \"### Q3: What is the Average Trading volume of the stock for past three years?\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"Avgvol_2019 =axis2019_df['Volume'].sum()/len(axis2019_df['Volume'])\\nAvgvol_2020 =axis2020_df['Volume'].sum()/len(axis2020_df['Volume'])\\nAvgvol_2021 =axis2021_df['Volume'].sum()/len(axis2021_df['Volume'])\\n# create a dataset\\ndata_volume = {'2019': Avgvol_2019, '2020':Avgvol_2020, '2021':Avgvol_2021}\\nYears = list(data_volume.keys())\\nAvgVol = list(data_volume.values())\\n# plotting a bar chart\\nplt.figure(figsize=(13, 7))\\nplt.bar(Years, AvgVol, color ='maroon',width = 0.3)\\nplt.xlabel(\\\"Years\\\")\\nplt.ylabel(\\\"Average Volume of the Stock Traded\\\")\\nplt.title(\\\"Average Trading volume of the Stock over the years 2019-2021(Till April) (in 10^7)\\\")\\nplt.show()\\n \",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"From the above plot we can say that more volume of the Axis Bank stock is traded during the year 2020. We can see a significant rise in the trading volume of the stock from 2019 to 2020. \",\n \"_____no_output_____\"\n ],\n [\n \"### Q4: What is the Average Closing price of the stock for past three years?\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"Avgclose_2019 =axis2019_df['Close'].sum()/len(axis2019_df['Close'])\\nAvgclose_2020 =axis2020_df['Close'].sum()/len(axis2020_df['Close'])\\nAvgclose_2021 =axis2021_df['Close'].sum()/len(axis2021_df['Close'])\\n# create a dataset\\ndata_volume = {'2019': Avgclose_2019, '2020':Avgclose_2020, '2021':Avgclose_2021}\\nYears = list(data_volume.keys())\\nAvgClose = list(data_volume.values())\\n# plotting a bar chart\\nplt.figure(figsize=(13, 7))\\nplt.bar(Years, AvgClose, color ='maroon',width = 0.3)\\nplt.xlabel(\\\"Years\\\")\\nplt.ylabel(\\\"Average Closding Price of the Stock Traded\\\")\\nplt.title(\\\"Average Closing price of the Stock over the years 2019-2021(Till April) (in 10^7)\\\")\\nplt.show()\\n \",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"We have seen the Trading Volume of the stock is more during the year 2020. In contrast, the Year 2020 has the lowest average closing price among the other two. But for the years 2019 and 2021 the Average closing price is almost same, there is not much change in the value.\",\n \"_____no_output_____\"\n ],\n [\n \" \",\n \"_____no_output_____\"\n ],\n [\n \"Let us save and upload our work to Jovian before continuing.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"import jovian\",\n \"_____no_output_____\"\n ],\n [\n \"jovian.commit()\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"## Inferences and Conclusion\\n\\nInferences : The above data analysis is done on the data set of stock quotes for AXIS BANK during the years 2019-2021. From the Analysis we can say that during the year 2020 there has been a lot of unsteady growth, there has been rise in the volume of stock traded on the exchange, that means there has been a lot of transactions of the stock. The stock has seen a swift traffic in buy/sell during the year 2020 and has fallen back to normal in the year 2021. In contrast to the volume of the stock the closing price of the stock has decreased during the year 2020, which can be concluded as the volume of the stock traded has no relation to the price change of the stock(while most people think there can be a correlation among the two values). The price decrease for the stock may have been due to the pandemic rise in India during the year 2020. \",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"import jovian\",\n \"_____no_output_____\"\n ],\n [\n \"jovian.commit()\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"## References and Future Work\\n\\nFuture Ideas for the Analyis:\\n* I am planning to go forward with this basic Analysis of the AXISBANK stock quotes and build a Machine Learning model predicting the future stock prices.\\n* I plan to automate the Data Analysis process for every stock in the NIFTY50 Index by defining reusable functions and automating the Analysis procedures.\\n* Study more strong correlations between the different quotes of the stock and analyze how and why they are related in that fashion. \\n\\nREFRENCES/LINKS USED FOR THIS PROJECT :\\n* https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.drop.html\\n* https://stackoverflow.com/questions/16683701/in-pandas-how-to-get-the-index-of-a-known-value\\n* https://towardsdatascience.com/working-with-datetime-in-pandas-dataframe-663f7af6c587\\n* https://thispointer.com/python-find-indexes-of-an-element-in-pandas-dataframe/\\n* https://pandas.pydata.org/pandas-docs/stable/user_guide/merging.html#timeseries-friendly-merging\\n* https://pandas.pydata.org/pandas-docs/stable/user_guide/merging.html\\n* https://towardsdatascience.com/financial-analytics-exploratory-data-analysis-of-stock-data-d98cbadf98b9\\n* https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.transpose.html\\n* https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.set_index.html\\n* https://pandas.pydata.org/docs/reference/api/pandas.merge.html\\n* https://stackoverflow.com/questions/14661701/how-to-drop-a-list-of-rows-from-pandas-dataframe\\n* https://www.interviewqs.com/ddi-code-snippets/extract-month-year-pandas\\n* https://stackoverflow.com/questions/18172851/deleting-dataframe-row-in-pandas-based-on-column-value\\n* https://queirozf.com/entries/matplotlib-examples-displaying-and-configuring-legends\\n* https://jakevdp.github.io/PythonDataScienceHandbook/04.06-customizing-legends.html\\n* https://matplotlib.org/stable/tutorials/intermediate/legend_guide.html\\n* https://matplotlib.org/devdocs/gallery/subplots_axes_and_figures/subplots_demo.html\\n* https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.subplots.html\\n* https://stackoverflow.com/questions/332289/how-do-you-change-the-size-of-figures-drawn-with-matplotlib\\n* https://www.investopedia.com/articles/investing/093014/stock-quotes-explained.asp\\n* https://stackoverflow.com/questions/44908383/how-can-i-group-by-month-from-a-datefield-using-python-pandas\\n* https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.hist.html\\n* https://note.nkmk.me/en/python-pandas-dataframe-rename/\\n* https://stackoverflow.com/questions/24748848/pandas-find-the-maximum-range-in-all-the-columns-of-dataframe\\n* https://stackoverflow.com/questions/29233283/plotting-multiple-lines-in-different-colors-with-pandas-dataframe\\n* https://jakevdp.github.io/PythonDataScienceHandbook/04.14-visualization-with-seaborn.html\\n* https://www.geeksforgeeks.org/python-pandas-extracting-rows-using-loc/\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"import jovian\",\n \"_____no_output_____\"\n ],\n [\n \"jovian.commit()\",\n \"_____no_output_____\"\n ]\n ]\n]"},"cell_types":{"kind":"list like","value":["markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code"],"string":"[\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\"\n]"},"cell_type_groups":{"kind":"list like","value":[["markdown","markdown","markdown"],["code"],["markdown"],["code","code"],["markdown"],["code","code"],["markdown"],["code","code","code","code"],["markdown"],["code","code","code","code","code","code","code","code","code","code","code"],["markdown"],["code","code","code","code","code","code","code"],["markdown","markdown"],["code"],["markdown"],["code","code","code","code","code","code"],["markdown"],["code","code"],["markdown","markdown"],["code","code","code"],["markdown"],["code","code"],["markdown"],["code","code"],["markdown","markdown","markdown"],["code","code"],["markdown"],["code","code"],["markdown","markdown"],["code","code","code","code"],["markdown"],["code","code"],["markdown"],["code"],["markdown","markdown"],["code"],["markdown","markdown","markdown"],["code","code"],["markdown"],["code","code"],["markdown"],["code","code"]],"string":"[\n [\n \"markdown\",\n \"markdown\",\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\"\n ],\n [\n \"markdown\",\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\"\n ],\n [\n \"markdown\",\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\"\n ],\n [\n \"markdown\",\n \"markdown\",\n \"markdown\"\n ],\n [\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\"\n ],\n [\n \"markdown\",\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\",\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\",\n \"markdown\",\n \"markdown\"\n ],\n [\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\"\n ]\n]"}}},{"rowIdx":1459073,"cells":{"hexsha":{"kind":"string","value":"e7f04c23bd22fd09e7ab8e2fb08305be76d545b8"},"size":{"kind":"number","value":13941,"string":"13,941"},"ext":{"kind":"string","value":"ipynb"},"lang":{"kind":"string","value":"Jupyter Notebook"},"max_stars_repo_path":{"kind":"string","value":"Array Interview Question.ipynb"},"max_stars_repo_name":{"kind":"string","value":"sillygod/ds_and_algorithm"},"max_stars_repo_head_hexsha":{"kind":"string","value":"4beff02c80220baece8bbfa778586b833fcc6d6f"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2021-04-06T10:06:21.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2021-04-06T10:06:21.000Z"},"max_issues_repo_path":{"kind":"string","value":"Array Interview Question.ipynb"},"max_issues_repo_name":{"kind":"string","value":"sillygod/ds_and_algorithm"},"max_issues_repo_head_hexsha":{"kind":"string","value":"4beff02c80220baece8bbfa778586b833fcc6d6f"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"Array Interview Question.ipynb"},"max_forks_repo_name":{"kind":"string","value":"sillygod/ds_and_algorithm"},"max_forks_repo_head_hexsha":{"kind":"string","value":"4beff02c80220baece8bbfa778586b833fcc6d6f"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"avg_line_length":{"kind":"number","value":24.0362068966,"string":"24.036207"},"max_line_length":{"kind":"number","value":193,"string":"193"},"alphanum_fraction":{"kind":"number","value":0.4567821534,"string":"0.456782"},"cells":{"kind":"list like","value":[[["# Array Interview Question\n\n\n### Anagram Check\n\nanagram是一種字的轉換,使用相同的字母以任意順序重新組成不同的字,之中有任意空白都可以例如, \"apple\" -> \"ap e lp\"\n","_____no_output_____"]],[["def anagram(s1, s2):\n l_bound = ord('0')\n r_bound = ord('z')\n appeared = [0]*(r_bound - l_bound)\n \n for letter in s1:\n if letter != ' ':\n mapping = ord(letter) - l_bound\n appeared[mapping] += 1\n\n for letter in s2:\n if letter != ' ':\n mapping = ord(letter) - l_bound\n appeared[mapping] -= 1\n if appeared[mapping] < 0:\n return False\n \n for ele in appeared:\n if ele != 0:\n return False\n \n return True\n","_____no_output_____"],["import unittest\n\n\nclass TestAnagram(unittest.TestCase):\n \n def test(self, solve):\n \n self.assertEqual(solve('go go go','gggooo'), True)\n self.assertEqual(solve('abc','cba'), True)\n self.assertEqual(solve('hi man','hi man'), True)\n self.assertEqual(solve('aabbcc','aabbc'), False)\n self.assertEqual(solve('123','1 2'), False)\n print('success')\n \n\nt = TestAnagram('test') # need to provide the method name, default is runTest\nt.test(anagram)","success\n"]],[["個人這邊這解法可能會不夠完善,因為僅僅是針對魚數字字母的陣列mapping,但是萬一有符號就不知道要怎辦了,所以當然是可以用dict來解掉這煩人的問題拉,只是想說這是屬於array類別的問題,就故意只用array解","_____no_output_____"],["### Array Pair Sum\n\n給予一個數字陣列,找出所有特定的數字配對的加起來為特定值k\nex.\n\n```python\n\npair_sum([1,3,2,2], 4)\n\n(1,3)\n(2,2)\n\n今天是要回傳有幾個配對就好,所以是回傳數字2\n```","_____no_output_____"]],[["def pair_sum(arr,k):\n res = [False]*len(arr)\n \n for i in range(len(arr)-1):\n for j in range(i+1,len(arr)):\n if arr[i] + arr[j] == k:\n res[i] = True\n res[j] = True\n \n pair_count = [1 for ele in res if ele == True]\n \n return len(pair_count)//2","_____no_output_____"]],[["上面效率會是$ Big O(n^2) $,但是如果可以使用dict或是set的話,就可以把效率壓到 $ BigO(n) $,因為 `n in dict` 這樣的查找只需 $ BigO(1) $,在array找尋你要的值是要花費 $ BigO(n) $,下面我們就來換成用set or dict來實作","_____no_output_____"]],[["def pair_sum_set_version(arr, k):\n to_seek = set()\n output = set()\n \n for num in arr:\n \n target = k - num\n \n if target not in to_seek:\n to_seek.add(num)\n else:\n output.add((min(num, target), max(num, target)))\n \n return len(output)","_____no_output_____"],["class TestPairSum(unittest.TestCase):\n \n def test(self, solve):\n \n self.assertEqual(solve([1,9,2,8,3,7,4,6,5,5,13,14,11,13,-1],10),6)\n self.assertEqual(solve([1,2,3,1],3),1)\n self.assertEqual(solve([1,3,2,2],4),2)\n print('success')\n \nt = TestPairSum()\nt.test(pair_sum_set_version)","success\n"]],[["### finding missing element\n\n這題是會給予你兩個array,第二個array是從第一個array隨機刪除一個元素後,並且進行洗亂的動作,然後今天你的任務就是要去找那個消失的元素","_____no_output_____"]],[["def finder(ary, ary2):\n table = {}\n \n for ele in ary:\n if ele in table:\n table[ele] += 1\n else:\n table[ele] = 1\n \n for ele in ary2:\n if ele in table:\n table[ele] -= 1\n else:\n return ele\n \n for k, v in table.items():\n if v != 0:\n return k","_____no_output_____"]],[["上面這個邏輯,如果是先用ary2去做表紀錄的話邏輯上會更加簡潔,也會少了最後一步\n\n```python\n\nfor ele in ary2:\n table[ele] = 1\n\nfor ele in ary1:\n if (ele not in table) or (table[ele] == 0):\n return ele\n else:\n table[ele] -= 1\n\n```\n\n這個解法算是最快的,因為如果使用排序的話最少都會要 $ n \\log n $,排序就是loop他去找不一樣的元素而已。\n\n\n另外有個天殺的聰明解法,這我真的沒想到就是使用XOR,讓我們先來看看code\nxor ( exclude or ) 具有排他性的or,就是or只要兩者之一有true結果就會是true,但是兩個都是true對於程式會是一種ambiguous,因此exclude這種情況,所以xor就是one or the other but not both\n\n\n$ A \\vee B $ but not $ A \\wedge B $\n\n直接從語意上翻譯成數學就是像下面\n\n$$ A \\oplus B = (A \\vee B) \\wedge \\neg ( A \\wedge B) $$\n\n\n總之呢! 因為xor的特性,若是兩個完全一樣的ary,你將會發現最後結果會是0\n\n```python\n\ndef finder_xor(arr1, arr2): \n result=0 \n \n # Perform an XOR between the numbers in the arrays\n for num in arr1+arr2: \n result^=num \n print result\n \n return result \n \n```\n\n","_____no_output_____"]],[["class TestFinder(unittest.TestCase):\n \n def test(self, solve):\n \n self.assertEqual(solve([5,5,7,7],[5,7,7]),5)\n self.assertEqual(solve([1,2,3,4,5,6,7],[3,7,2,1,4,6]),5)\n self.assertEqual(solve([9,8,7,6,5,4,3,2,1],[9,8,7,5,4,3,2,1]),6)\n print('success')\n \nt = TestFinder()\nt.test(finder)","success\n"]],[["### largest continuous sum\n\n題目會給予你一個陣列,你的任務就是要去從裡面發現哪種連續數字的總和會是最大值,不一定是全部數字加起來是最大,因為裡面會有負數,有可能是從某某位置開始的連續X個數子總和才是最大。\n","_____no_output_____"]],[["def lar_con_sum(ary):\n \n if len(ary) == 0:\n return 0\n \n max_sum = cur_sum = ary[0]\n \n for num in ary[1:]:\n cur_sum = max(cur_sum+num, num)\n max_sum = max(cur_sum, max_sum)\n \n return max_sum\n \n ","_____no_output_____"]],[["這題的思緒是,長度n的連續數字最大和,一定是從長度n-1連續數字最大和來的\n\n所以今天從index=0時來看,因為元素只有一個這時候就是他本身為最大值,當index=1時,我們就要來比較ele[0]+ele[1]和ele[0] <- 當前最大值的比較,比較這兩者然後取最大的,需要注意的是,我們需要暫存目前的sum,因為這是拿來判斷後面遇到負數狀時況,計算另一個最大值的點,此時另一個最大值(cur_sum)仍然會與之前最大值去比較(max_sum),","_____no_output_____"]],[["class TestLargestConSum(unittest.TestCase):\n \n def test(self, solve):\n \n self.assertEqual(solve([1,2,-1,3,4,-1]),9)\n self.assertEqual(solve([1,2,-1,3,4,10,10,-10,-1]),29)\n self.assertEqual(solve([-1,1]),1)\n self.assertEqual(solve([1,2,-10,5,6]), 11)\n print('success')\n \nt = TestLargestConSum()\nt.test(lar_con_sum)","success\n"]],[["#### Sentence Reversal\n\n給予一個字串,然後反轉單字順序,例如: 'here it is' -> 'is it here'","_____no_output_____"]],[["def sentenceReversal(str1):\n str1 = str1.strip()\n words = str1.split() \n \n result = ''\n \n for i in range(len(words)):\n result += ' '+words[len(words)-i-1]\n \n return result.strip()\n ","_____no_output_____"],["class TestSentenceReversal(unittest.TestCase):\n \n def test(self, solve):\n self.assertEqual(solve(' space before'),'before space')\n self.assertEqual(solve('space after '),'after space')\n self.assertEqual(solve(' Hello John how are you '),'you are how John Hello')\n self.assertEqual(solve('1'),'1')\n print('success')\n \nt = TestSentenceReversal()\nt.test(sentenceReversal)","success\n"]],[["值得注意的是python string split這個方法,不帶參數的話,預設是做strip的事然後分割,跟你使用 split(' ')得到的結果會不一樣,另外面試時可能要使用比較基本的方式來實作這題,也就是少用python trick的方式。","_____no_output_____"],["#### string compression\n\n給予一串字串,轉換成數字加字母的標記法,雖然覺得這個壓縮怪怪的,因為無法保留字母順序","_____no_output_____"]],[["def compression(str1):\n mapping = {}\n letter_order = [False]\n result = ''\n \n for ele in str1:\n if ele != letter_order[-1]:\n letter_order.append(ele)\n \n if ele not in mapping:\n mapping[ele] = 1\n else:\n mapping[ele] += 1\n \n for key in letter_order[1:]:\n result += '{}{}'.format(key, mapping[key])\n \n return result","_____no_output_____"],["class TestCompression(unittest.TestCase):\n \n def test(self, solve):\n self.assertEqual(solve(''), '')\n self.assertEqual(solve('AABBCC'), 'A2B2C2')\n self.assertEqual(solve('AAABCCDDDDD'), 'A3B1C2D5')\n print('success')\n \nt = TestCompression()\nt.test(compression)","success\n"]],[["#### unique characters in string\n\n給予一串字串並判斷他是否全部不同的字母\n","_____no_output_____"]],[["def uni_char(str1):\n mapping = {}\n \n for letter in str1:\n if letter in mapping:\n return False\n else:\n mapping[letter] = True\n \n return True\n\ndef uni_char2(str1):\n return len(set(str1)) == len(str1)","_____no_output_____"],["class TestUniChar(unittest.TestCase):\n \n def test(self, solve):\n self.assertEqual(solve(''), True)\n self.assertEqual(solve('goo'), False)\n self.assertEqual(solve('abcdefg'), True)\n print('success')\n \nt = TestUniChar()\nt.test(uni_char2)","success\n"]]],"string":"[\n [\n [\n \"# Array Interview Question\\n\\n\\n### Anagram Check\\n\\nanagram是一種字的轉換,使用相同的字母以任意順序重新組成不同的字,之中有任意空白都可以例如, \\\"apple\\\" -> \\\"ap e lp\\\"\\n\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"def anagram(s1, s2):\\n l_bound = ord('0')\\n r_bound = ord('z')\\n appeared = [0]*(r_bound - l_bound)\\n \\n for letter in s1:\\n if letter != ' ':\\n mapping = ord(letter) - l_bound\\n appeared[mapping] += 1\\n\\n for letter in s2:\\n if letter != ' ':\\n mapping = ord(letter) - l_bound\\n appeared[mapping] -= 1\\n if appeared[mapping] < 0:\\n return False\\n \\n for ele in appeared:\\n if ele != 0:\\n return False\\n \\n return True\\n\",\n \"_____no_output_____\"\n ],\n [\n \"import unittest\\n\\n\\nclass TestAnagram(unittest.TestCase):\\n \\n def test(self, solve):\\n \\n self.assertEqual(solve('go go go','gggooo'), True)\\n self.assertEqual(solve('abc','cba'), True)\\n self.assertEqual(solve('hi man','hi man'), True)\\n self.assertEqual(solve('aabbcc','aabbc'), False)\\n self.assertEqual(solve('123','1 2'), False)\\n print('success')\\n \\n\\nt = TestAnagram('test') # need to provide the method name, default is runTest\\nt.test(anagram)\",\n \"success\\n\"\n ]\n ],\n [\n [\n \"個人這邊這解法可能會不夠完善,因為僅僅是針對魚數字字母的陣列mapping,但是萬一有符號就不知道要怎辦了,所以當然是可以用dict來解掉這煩人的問題拉,只是想說這是屬於array類別的問題,就故意只用array解\",\n \"_____no_output_____\"\n ],\n [\n \"### Array Pair Sum\\n\\n給予一個數字陣列,找出所有特定的數字配對的加起來為特定值k\\nex.\\n\\n```python\\n\\npair_sum([1,3,2,2], 4)\\n\\n(1,3)\\n(2,2)\\n\\n今天是要回傳有幾個配對就好,所以是回傳數字2\\n```\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"def pair_sum(arr,k):\\n res = [False]*len(arr)\\n \\n for i in range(len(arr)-1):\\n for j in range(i+1,len(arr)):\\n if arr[i] + arr[j] == k:\\n res[i] = True\\n res[j] = True\\n \\n pair_count = [1 for ele in res if ele == True]\\n \\n return len(pair_count)//2\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"上面效率會是$ Big O(n^2) $,但是如果可以使用dict或是set的話,就可以把效率壓到 $ BigO(n) $,因為 `n in dict` 這樣的查找只需 $ BigO(1) $,在array找尋你要的值是要花費 $ BigO(n) $,下面我們就來換成用set or dict來實作\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"def pair_sum_set_version(arr, k):\\n to_seek = set()\\n output = set()\\n \\n for num in arr:\\n \\n target = k - num\\n \\n if target not in to_seek:\\n to_seek.add(num)\\n else:\\n output.add((min(num, target), max(num, target)))\\n \\n return len(output)\",\n \"_____no_output_____\"\n ],\n [\n \"class TestPairSum(unittest.TestCase):\\n \\n def test(self, solve):\\n \\n self.assertEqual(solve([1,9,2,8,3,7,4,6,5,5,13,14,11,13,-1],10),6)\\n self.assertEqual(solve([1,2,3,1],3),1)\\n self.assertEqual(solve([1,3,2,2],4),2)\\n print('success')\\n \\nt = TestPairSum()\\nt.test(pair_sum_set_version)\",\n \"success\\n\"\n ]\n ],\n [\n [\n \"### finding missing element\\n\\n這題是會給予你兩個array,第二個array是從第一個array隨機刪除一個元素後,並且進行洗亂的動作,然後今天你的任務就是要去找那個消失的元素\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"def finder(ary, ary2):\\n table = {}\\n \\n for ele in ary:\\n if ele in table:\\n table[ele] += 1\\n else:\\n table[ele] = 1\\n \\n for ele in ary2:\\n if ele in table:\\n table[ele] -= 1\\n else:\\n return ele\\n \\n for k, v in table.items():\\n if v != 0:\\n return k\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"上面這個邏輯,如果是先用ary2去做表紀錄的話邏輯上會更加簡潔,也會少了最後一步\\n\\n```python\\n\\nfor ele in ary2:\\n table[ele] = 1\\n\\nfor ele in ary1:\\n if (ele not in table) or (table[ele] == 0):\\n return ele\\n else:\\n table[ele] -= 1\\n\\n```\\n\\n這個解法算是最快的,因為如果使用排序的話最少都會要 $ n \\\\log n $,排序就是loop他去找不一樣的元素而已。\\n\\n\\n另外有個天殺的聰明解法,這我真的沒想到就是使用XOR,讓我們先來看看code\\nxor ( exclude or ) 具有排他性的or,就是or只要兩者之一有true結果就會是true,但是兩個都是true對於程式會是一種ambiguous,因此exclude這種情況,所以xor就是one or the other but not both\\n\\n\\n$ A \\\\vee B $ but not $ A \\\\wedge B $\\n\\n直接從語意上翻譯成數學就是像下面\\n\\n$$ A \\\\oplus B = (A \\\\vee B) \\\\wedge \\\\neg ( A \\\\wedge B) $$\\n\\n\\n總之呢! 因為xor的特性,若是兩個完全一樣的ary,你將會發現最後結果會是0\\n\\n```python\\n\\ndef finder_xor(arr1, arr2): \\n result=0 \\n \\n # Perform an XOR between the numbers in the arrays\\n for num in arr1+arr2: \\n result^=num \\n print result\\n \\n return result \\n \\n```\\n\\n\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"class TestFinder(unittest.TestCase):\\n \\n def test(self, solve):\\n \\n self.assertEqual(solve([5,5,7,7],[5,7,7]),5)\\n self.assertEqual(solve([1,2,3,4,5,6,7],[3,7,2,1,4,6]),5)\\n self.assertEqual(solve([9,8,7,6,5,4,3,2,1],[9,8,7,5,4,3,2,1]),6)\\n print('success')\\n \\nt = TestFinder()\\nt.test(finder)\",\n \"success\\n\"\n ]\n ],\n [\n [\n \"### largest continuous sum\\n\\n題目會給予你一個陣列,你的任務就是要去從裡面發現哪種連續數字的總和會是最大值,不一定是全部數字加起來是最大,因為裡面會有負數,有可能是從某某位置開始的連續X個數子總和才是最大。\\n\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"def lar_con_sum(ary):\\n \\n if len(ary) == 0:\\n return 0\\n \\n max_sum = cur_sum = ary[0]\\n \\n for num in ary[1:]:\\n cur_sum = max(cur_sum+num, num)\\n max_sum = max(cur_sum, max_sum)\\n \\n return max_sum\\n \\n \",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"這題的思緒是,長度n的連續數字最大和,一定是從長度n-1連續數字最大和來的\\n\\n所以今天從index=0時來看,因為元素只有一個這時候就是他本身為最大值,當index=1時,我們就要來比較ele[0]+ele[1]和ele[0] <- 當前最大值的比較,比較這兩者然後取最大的,需要注意的是,我們需要暫存目前的sum,因為這是拿來判斷後面遇到負數狀時況,計算另一個最大值的點,此時另一個最大值(cur_sum)仍然會與之前最大值去比較(max_sum),\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"class TestLargestConSum(unittest.TestCase):\\n \\n def test(self, solve):\\n \\n self.assertEqual(solve([1,2,-1,3,4,-1]),9)\\n self.assertEqual(solve([1,2,-1,3,4,10,10,-10,-1]),29)\\n self.assertEqual(solve([-1,1]),1)\\n self.assertEqual(solve([1,2,-10,5,6]), 11)\\n print('success')\\n \\nt = TestLargestConSum()\\nt.test(lar_con_sum)\",\n \"success\\n\"\n ]\n ],\n [\n [\n \"#### Sentence Reversal\\n\\n給予一個字串,然後反轉單字順序,例如: 'here it is' -> 'is it here'\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"def sentenceReversal(str1):\\n str1 = str1.strip()\\n words = str1.split() \\n \\n result = ''\\n \\n for i in range(len(words)):\\n result += ' '+words[len(words)-i-1]\\n \\n return result.strip()\\n \",\n \"_____no_output_____\"\n ],\n [\n \"class TestSentenceReversal(unittest.TestCase):\\n \\n def test(self, solve):\\n self.assertEqual(solve(' space before'),'before space')\\n self.assertEqual(solve('space after '),'after space')\\n self.assertEqual(solve(' Hello John how are you '),'you are how John Hello')\\n self.assertEqual(solve('1'),'1')\\n print('success')\\n \\nt = TestSentenceReversal()\\nt.test(sentenceReversal)\",\n \"success\\n\"\n ]\n ],\n [\n [\n \"值得注意的是python string split這個方法,不帶參數的話,預設是做strip的事然後分割,跟你使用 split(' ')得到的結果會不一樣,另外面試時可能要使用比較基本的方式來實作這題,也就是少用python trick的方式。\",\n \"_____no_output_____\"\n ],\n [\n \"#### string compression\\n\\n給予一串字串,轉換成數字加字母的標記法,雖然覺得這個壓縮怪怪的,因為無法保留字母順序\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"def compression(str1):\\n mapping = {}\\n letter_order = [False]\\n result = ''\\n \\n for ele in str1:\\n if ele != letter_order[-1]:\\n letter_order.append(ele)\\n \\n if ele not in mapping:\\n mapping[ele] = 1\\n else:\\n mapping[ele] += 1\\n \\n for key in letter_order[1:]:\\n result += '{}{}'.format(key, mapping[key])\\n \\n return result\",\n \"_____no_output_____\"\n ],\n [\n \"class TestCompression(unittest.TestCase):\\n \\n def test(self, solve):\\n self.assertEqual(solve(''), '')\\n self.assertEqual(solve('AABBCC'), 'A2B2C2')\\n self.assertEqual(solve('AAABCCDDDDD'), 'A3B1C2D5')\\n print('success')\\n \\nt = TestCompression()\\nt.test(compression)\",\n \"success\\n\"\n ]\n ],\n [\n [\n \"#### unique characters in string\\n\\n給予一串字串並判斷他是否全部不同的字母\\n\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"def uni_char(str1):\\n mapping = {}\\n \\n for letter in str1:\\n if letter in mapping:\\n return False\\n else:\\n mapping[letter] = True\\n \\n return True\\n\\ndef uni_char2(str1):\\n return len(set(str1)) == len(str1)\",\n \"_____no_output_____\"\n ],\n [\n \"class TestUniChar(unittest.TestCase):\\n \\n def test(self, solve):\\n self.assertEqual(solve(''), True)\\n self.assertEqual(solve('goo'), False)\\n self.assertEqual(solve('abcdefg'), True)\\n print('success')\\n \\nt = TestUniChar()\\nt.test(uni_char2)\",\n \"success\\n\"\n ]\n ]\n]"},"cell_types":{"kind":"list like","value":["markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code"],"string":"[\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\"\n]"},"cell_type_groups":{"kind":"list like","value":[["markdown"],["code","code"],["markdown","markdown"],["code"],["markdown"],["code","code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code","code"],["markdown","markdown"],["code","code"],["markdown"],["code","code"]],"string":"[\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\"\n ],\n [\n \"markdown\",\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\"\n ],\n [\n \"markdown\",\n \"markdown\"\n ],\n [\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\"\n ]\n]"}}},{"rowIdx":1459074,"cells":{"hexsha":{"kind":"string","value":"e7f05da0a6507fbb7c68d9f152ad46ec65580eda"},"size":{"kind":"number","value":13614,"string":"13,614"},"ext":{"kind":"string","value":"ipynb"},"lang":{"kind":"string","value":"Jupyter Notebook"},"max_stars_repo_path":{"kind":"string","value":"convolutional-neural-networks/mnist-mlp/mnist_mlp_exercise.ipynb"},"max_stars_repo_name":{"kind":"string","value":"armhzjz/deep-learning-v2-pytorch"},"max_stars_repo_head_hexsha":{"kind":"string","value":"cedd30851aba8241a76d5278ce69286058d99fb1"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"convolutional-neural-networks/mnist-mlp/mnist_mlp_exercise.ipynb"},"max_issues_repo_name":{"kind":"string","value":"armhzjz/deep-learning-v2-pytorch"},"max_issues_repo_head_hexsha":{"kind":"string","value":"cedd30851aba8241a76d5278ce69286058d99fb1"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"convolutional-neural-networks/mnist-mlp/mnist_mlp_exercise.ipynb"},"max_forks_repo_name":{"kind":"string","value":"armhzjz/deep-learning-v2-pytorch"},"max_forks_repo_head_hexsha":{"kind":"string","value":"cedd30851aba8241a76d5278ce69286058d99fb1"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"avg_line_length":{"kind":"number","value":34.4658227848,"string":"34.465823"},"max_line_length":{"kind":"number","value":349,"string":"349"},"alphanum_fraction":{"kind":"number","value":0.572572352,"string":"0.572572"},"cells":{"kind":"list like","value":[[["# Multi-Layer Perceptron, MNIST\n---\nIn this notebook, we will train an MLP to classify images from the [MNIST database](http://yann.lecun.com/exdb/mnist/) hand-written digit database.\n\nThe process will be broken down into the following steps:\n>1. Load and visualize the data\n2. Define a neural network\n3. Train the model\n4. Evaluate the performance of our trained model on a test dataset!\n\nBefore we begin, we have to import the necessary libraries for working with data and PyTorch.","_____no_output_____"]],[["# import libraries\nimport torch\nimport numpy as np","_____no_output_____"]],[["---\n## Load and Visualize the [Data](http://pytorch.org/docs/stable/torchvision/datasets.html)\n\nDownloading may take a few moments, and you should see your progress as the data is loading. You may also choose to change the `batch_size` if you want to load more data at a time.\n\nThis cell will create DataLoaders for each of our datasets.","_____no_output_____"]],[["from torchvision import datasets\nimport torchvision.transforms as transforms\n\n# number of subprocesses to use for data loading\nnum_workers = 0\n# how many samples per batch to load\nbatch_size = 20\n\n# convert data to torch.FloatTensor\ntransform = transforms.ToTensor()\n\n# choose the training and test datasets\ntrain_data = datasets.MNIST(root='data', train=True,\n download=True, transform=transform)\ntest_data = datasets.MNIST(root='data', train=False,\n download=True, transform=transform)\n\n# prepare data loaders\ntrain_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size,\n num_workers=num_workers)\ntest_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size, \n num_workers=num_workers)","_____no_output_____"]],[["### Visualize a Batch of Training Data\n\nThe first step in a classification task is to take a look at the data, make sure it is loaded in correctly, then make any initial observations about patterns in that data.","_____no_output_____"]],[["import matplotlib.pyplot as plt\n%matplotlib inline\n \n# obtain one batch of training images\ndataiter = iter(train_loader)\nimages, labels = dataiter.next()\nimages = images.numpy()\n\n# plot the images in the batch, along with the corresponding labels\nfig = plt.figure(figsize=(25, 4))\nfor idx in np.arange(20):\n ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])\n ax.imshow(np.squeeze(images[idx]), cmap='gray')\n # print out the correct label for each image\n # .item() gets the value contained in a Tensor\n ax.set_title(str(labels[idx].item()))","_____no_output_____"]],[["### View an Image in More Detail","_____no_output_____"]],[["img = np.squeeze(images[1])\n\nfig = plt.figure(figsize = (12,12)) \nax = fig.add_subplot(111)\nax.imshow(img, cmap='gray')\nwidth, height = img.shape\nthresh = img.max()/2.5\nfor x in range(width):\n for y in range(height):\n val = round(img[x][y],2) if img[x][y] !=0 else 0\n ax.annotate(str(val), xy=(y,x),\n horizontalalignment='center',\n verticalalignment='center',\n color='white' if img[x][y]