{ // 获取包含Hugging Face文本的span元素 const spans = link.querySelectorAll('span.whitespace-nowrap, span.hidden.whitespace-nowrap'); spans.forEach(span => { if (span.textContent && span.textContent.trim().match(/Hugging\s*Face/i)) { span.textContent = 'AI快站'; } }); }); // 替换logo图片的alt属性 document.querySelectorAll('img[alt*="Hugging"], img[alt*="Face"]').forEach(img => { if (img.alt.match(/Hugging\s*Face/i)) { img.alt = 'AI快站 logo'; } }); } // 替换导航栏中的链接 function replaceNavigationLinks() { // 已替换标记,防止重复运行 if (window._navLinksReplaced) { return; } // 已经替换过的链接集合,防止重复替换 const replacedLinks = new Set(); // 只在导航栏区域查找和替换链接 const headerArea = document.querySelector('header') || document.querySelector('nav'); if (!headerArea) { return; } // 在导航区域内查找链接 const navLinks = headerArea.querySelectorAll('a'); navLinks.forEach(link => { // 如果已经替换过,跳过 if (replacedLinks.has(link)) return; const linkText = link.textContent.trim(); const linkHref = link.getAttribute('href') || ''; // 替换Spaces链接 - 仅替换一次 if ( (linkHref.includes('/spaces') || linkHref === '/spaces' || linkText === 'Spaces' || linkText.match(/^s*Spacess*$/i)) && linkText !== 'OCR模型免费转Markdown' && linkText !== 'OCR模型免费转Markdown' ) { link.textContent = 'OCR模型免费转Markdown'; link.href = 'https://fast360.xyz'; link.setAttribute('target', '_blank'); link.setAttribute('rel', 'noopener noreferrer'); replacedLinks.add(link); } // 删除Posts链接 else if ( (linkHref.includes('/posts') || linkHref === '/posts' || linkText === 'Posts' || linkText.match(/^s*Postss*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } // 替换Docs链接 - 仅替换一次 else if ( (linkHref.includes('/docs') || linkHref === '/docs' || linkText === 'Docs' || linkText.match(/^s*Docss*$/i)) && linkText !== '模型下载攻略' ) { link.textContent = '模型下载攻略'; link.href = '/'; replacedLinks.add(link); } // 删除Enterprise链接 else if ( (linkHref.includes('/enterprise') || linkHref === '/enterprise' || linkText === 'Enterprise' || linkText.match(/^s*Enterprises*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } }); // 查找可能嵌套的Spaces和Posts文本 const textNodes = []; function findTextNodes(element) { if (element.nodeType === Node.TEXT_NODE) { const text = element.textContent.trim(); if (text === 'Spaces' || text === 'Posts' || text === 'Enterprise') { textNodes.push(element); } } else { for (const child of element.childNodes) { findTextNodes(child); } } } // 只在导航区域内查找文本节点 findTextNodes(headerArea); // 替换找到的文本节点 textNodes.forEach(node => { const text = node.textContent.trim(); if (text === 'Spaces') { node.textContent = node.textContent.replace(/Spaces/g, 'OCR模型免费转Markdown'); } else if (text === 'Posts') { // 删除Posts文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } else if (text === 'Enterprise') { // 删除Enterprise文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } }); // 标记已替换完成 window._navLinksReplaced = true; } // 替换代码区域中的域名 function replaceCodeDomains() { // 特别处理span.hljs-string和span.njs-string元素 document.querySelectorAll('span.hljs-string, span.njs-string, span[class*="hljs-string"], span[class*="njs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换hljs-string类的span中的域名(移除多余的转义符号) document.querySelectorAll('span.hljs-string, span[class*="hljs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换pre和code标签中包含git clone命令的域名 document.querySelectorAll('pre, code').forEach(element => { if (element.textContent && element.textContent.includes('git clone')) { const text = element.innerHTML; if (text.includes('huggingface.co')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 处理特定的命令行示例 document.querySelectorAll('pre, code').forEach(element => { const text = element.innerHTML; if (text.includes('huggingface.co')) { // 针对git clone命令的专门处理 if (text.includes('git clone') || text.includes('GIT_LFS_SKIP_SMUDGE=1')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 特别处理模型下载页面上的代码片段 document.querySelectorAll('.flex.border-t, .svelte_hydrator, .inline-block').forEach(container => { const content = container.innerHTML; if (content && content.includes('huggingface.co')) { container.innerHTML = content.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 特别处理模型仓库克隆对话框中的代码片段 try { // 查找包含"Clone this model repository"标题的对话框 const cloneDialog = document.querySelector('.svelte_hydration_boundary, [data-target="MainHeader"]'); if (cloneDialog) { // 查找对话框中所有的代码片段和命令示例 const codeElements = cloneDialog.querySelectorAll('pre, code, span'); codeElements.forEach(element => { if (element.textContent && element.textContent.includes('huggingface.co')) { if (element.innerHTML.includes('huggingface.co')) { element.innerHTML = element.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { element.textContent = element.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); } // 更精确地定位克隆命令中的域名 document.querySelectorAll('[data-target]').forEach(container => { const codeBlocks = container.querySelectorAll('pre, code, span.hljs-string'); codeBlocks.forEach(block => { if (block.textContent && block.textContent.includes('huggingface.co')) { if (block.innerHTML.includes('huggingface.co')) { block.innerHTML = block.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { block.textContent = block.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); }); } catch (e) { // 错误处理但不打印日志 } } // 当DOM加载完成后执行替换 if (document.readyState === 'loading') { document.addEventListener('DOMContentLoaded', () => { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); }); } else { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); } // 增加一个MutationObserver来处理可能的动态元素加载 const observer = new MutationObserver(mutations => { // 检查是否导航区域有变化 const hasNavChanges = mutations.some(mutation => { // 检查是否存在header或nav元素变化 return Array.from(mutation.addedNodes).some(node => { if (node.nodeType === Node.ELEMENT_NODE) { // 检查是否是导航元素或其子元素 if (node.tagName === 'HEADER' || node.tagName === 'NAV' || node.querySelector('header, nav')) { return true; } // 检查是否在导航元素内部 let parent = node.parentElement; while (parent) { if (parent.tagName === 'HEADER' || parent.tagName === 'NAV') { return true; } parent = parent.parentElement; } } return false; }); }); // 只在导航区域有变化时执行替换 if (hasNavChanges) { // 重置替换状态,允许再次替换 window._navLinksReplaced = false; replaceHeaderBranding(); replaceNavigationLinks(); } }); // 开始观察document.body的变化,包括子节点 if (document.body) { observer.observe(document.body, { childList: true, subtree: true }); } else { document.addEventListener('DOMContentLoaded', () => { observer.observe(document.body, { childList: true, subtree: true }); }); } })(); \n\n\n","_____no_output_____"]],[["from __future__ import print_function\nimport numpy as np\nimport pandas as pd\nfrom ipywidgets import interact, interactive, fixed, interact_manual\nimport ipywidgets as widgets","_____no_output_____"]],[["## Probabilités - approche fréquentiste\n### Définition par la fréquence relative :\n* une expérience d’ensemble fondamental est exécutée plusieurs fois sous les mêmes conditions.\n* Pour chaque événement E de , n(E) est le nombre de fois où l’événement E survient lors des n premières répétitions de l’expérience.\n* P(E), la probabilité de l’événement E est définie de la manière suivante :\n\n$$P(E)=\\lim_{n\\to\\infty}\\dfrac{n(E)}{n} $$ ","_____no_output_____"],["## Simulation d'un dé parfait","_____no_output_____"]],[["# seed the random number generator\nnp.random.seed(1)\n\n# Example: sampling \n#\n# do not forget that Python arrays are zero-indexed,\n# and the 2nd argument to NumPy arange must be incremented by 1\n# if you want to include that value\nn = 6\nk = 200000\nT=np.random.choice(np.arange(1, n+1), k, replace=True)\nunique, counts = np.unique(T, return_counts=True)\ndic=dict(zip(unique, counts))\ndf=pd.DataFrame(list(dic.items()),columns=['i','Occurence'])\ndf.set_index(['i'], inplace=True)\ndf['Freq']=df['Occurence']/k\ndf['P({i})']='{}'.format(1/6)\ndf","_____no_output_____"]],[["## Ajouter de l'intéraction ","_____no_output_____"]],[["def dice_sim(k=100):\n n = 6 \n T=np.random.choice(np.arange(1, n+1), k, replace=True)\n unique, counts = np.unique(T, return_counts=True)\n dic=dict(zip(unique, counts))\n df=pd.DataFrame(list(dic.items()),columns=['i','Occurence'])\n df.set_index(['i'], inplace=True)\n df['Freq']=df['Occurence']/k\n df['P({i})']='{0:.3f}'.format(1/6)\n return df\n ","_____no_output_____"],["dice_sim(100)","_____no_output_____"],["interact(dice_sim,k=widgets.IntSlider(min=1000, max=50000, step=500, value=10));","_____no_output_____"]],[["## Cas d'un dé truqué","_____no_output_____"]],[["p=[0.1, 0.1, 0.1, 0.1,0.1,0.5]\nsum(p)","_____no_output_____"],["def dice_sim(k=100,q=[[0.1, 0.1, 0.1, 0.1,0.1,0.5],[0.2, 0.1, 0.2, 0.1,0.1,0.3]]):\n n = 6\n qq=q\n T=np.random.choice(np.arange(1, n+1), k, replace=True,p=qq)\n unique, counts = np.unique(T, return_counts=True)\n dic=dict(zip(unique, counts))\n df=pd.DataFrame(list(dic.items()),columns=['i','Occurence'])\n df.set_index(['i'], inplace=True)\n df['Freq']=df['Occurence']/k\n df['P({i})']=['{0:.3f}'.format(j) for j in q]\n return df","_____no_output_____"],["interact(dice_sim,k=widgets.IntSlider(min=1000, max=50000, step=500, value=10));","_____no_output_____"]],[["## Exercice 1: \n\nTester l'intéraction précédente pour plusieurs valeurs de `p`\nDonner votre conclusion :","_____no_output_____"]],[["# Conclusion \n\n\n\n\n","_____no_output_____"]],[["## Permutation Aléatoire","_____no_output_____"]],[["np.random.seed(2)\n\nm = 1\nn = 10\n\nv = np.arange(m, n+1)\nprint('v =', v)\n\nnp.random.shuffle(v)\nprint('v, shuffled =', v)","v = [ 1 2 3 4 5 6 7 8 9 10]\nv, shuffled = [ 5 2 6 1 8 3 4 7 10 9]\n"]],[["## Exercice 2\nVérifier que les permutation aléatoires sont uniforme , c'est à dire que la probabilité de générer une permutation d'élement de {1,2,3} est 1/6.\nEn effet les permutations de {1,2,3} sont :\n* 1 2 3\n* 1 3 2\n* 2 1 3\n* 2 3 1\n* 3 1 2\n* 3 2 1\n","_____no_output_____"]],[["k =10\nm = 1\nn = 3\nv = np.arange(m, n+1)\nT=[]\nfor i in range(k):\n np.random.shuffle(v)\n w=np.copy(v)\n T.append(w)","_____no_output_____"],["TT=[str(i) for i in T]\nTT","_____no_output_____"],["k =1000\nm = 1\nn = 3\nv = np.arange(m, n+1)\nT=[]\nfor i in range(k):\n np.random.shuffle(v)\n w=np.copy(v)\n T.append(w)\n\nTT=[str(i) for i in T]\nunique, counts = np.unique(TT, return_counts=True)\ndic=dict(zip(unique, counts))\ndf=pd.DataFrame(list(dic.items()),columns=['i','Occurence'])\ndf.set_index(['i'], inplace=True)\ndf['Freq']=df['Occurence']/k\ndf['P({i,j,k})']='{0:.3f}'.format(1/6)\ndf","_____no_output_____"]],[["### Donner votre conclusion en expliquant le script ","_____no_output_____"]],[["## Explication \n\n\n\n\n\n\n\n\n","_____no_output_____"]],[["## Probabilité conditionnelle ","_____no_output_____"],["Rappelons que l'interprétation fréquentiste de la probabilité conditionnelle basée sur un grand nombre `n` de répétitions d'une expérience est $ P (A | B) ≈ n_ {AB} / n_ {B} $, où $ n_ {AB} $ est le nombre de fois où $ A \\cap B $ se produit et $ n_ {B} $ est le nombre de fois où $ B $ se produit. Essayons cela par simulation et vérifions les résultats de l'exemple 2.2.5. Utilisons donc [`numpy.random.choice`] (https://docs.scipy.org/doc/numpy-1.15.0/reference/generated/numpy.random.choice.html) pour simuler les familles` n`, chacun avec deux enfants.\n","_____no_output_____"]],[["np.random.seed(34)\n\nn = 10**5\nchild1 = np.random.choice([1,2], n, replace=True) \nchild2 = np.random.choice([1,2], n, replace=True) \n\nprint('child1:\\n{}\\n'.format(child1))\n\nprint('child2:\\n{}\\n'.format(child2))","child1:\n[2 1 1 ... 1 2 1]\n\nchild2:\n[2 2 2 ... 2 2 1]\n\n"]],[["Ici, «child1» est un «tableau NumPy» de longueur «n», où chaque élément est un 1 ou un 2. En laissant 1 pour «fille» et 2 pour «garçon», ce «tableau» représente le sexe du enfant aîné dans chacune des familles «n». De même, «enfant2» représente le sexe du plus jeune enfant de chaque famille.\n","_____no_output_____"]],[["np.random.choice([\"girl\", \"boy\"], n, replace=True)","_____no_output_____"]],[["mais il est plus pratique de travailler avec des valeurs numériques.\n\nSoit $ A $ l'événement où les deux enfants sont des filles et $ B $ l'événement où l'aîné est une fille. Suivant l'interprétation fréquentiste, nous comptons le nombre de répétitions où $ B $ s'est produit et le nommons `n_b`, et nous comptons également le nombre de répétitions où $ A \\cap B $ s'est produit et le nommons` n_ab`. Enfin, nous divisons `n_ab` par` n_b` pour approximer $ P (A | B) $.","_____no_output_____"]],[["n_b = np.sum(child1==1)\nn_ab = np.sum((child1==1) & (child2==1))\n\nprint('P(both girls | elder is girl) = {:0.2F}'.format(n_ab / n_b))","P(both girls | elder is girl) = 0.50\n"]],[["L'esperluette `&` est un élément par élément $ AND $, donc `n_ab` est le nombre de familles où le premier et le deuxième enfant sont des filles. Lorsque nous avons exécuté ce code, nous avons obtenu 0,50, confirmant notre réponse $ P (\\text {les deux filles | l'aîné est une fille}) = 1/2 $.\n\nSoit maintenant $ A $ l'événement où les deux enfants sont des filles et $ B $ l'événement selon lequel au moins l'un des enfants est une fille. Alors $ A \\cap B $ est le même, mais `n_b` doit compter le nombre de familles où au moins un enfant est une fille. Ceci est accompli avec l'opérateur élémentaire $ OR $ `|` (ce n'est pas une barre de conditionnement; c'est un $ OR $ inclusif, retournant `True` si au moins un élément est` True`).","_____no_output_____"]],[["n_b = np.sum((child1==1) | (child2==2))\nn_ab = np.sum((child1==1) & (child2==1))\n\nprint('P(both girls | at least one girl) = {:0.2F}'.format(n_ab / n_b))","P(both girls | at least one girl) = 0.33\n"]],[["Pour nous, le résultat était de 0,33, confirmant que $ P (\\text {les deux filles | au moins une fille}) = 1/3 $.","_____no_output_____"]]],"string":"[\n [\n [\n \"\\n# Nom et prénom\\n# TP1 Probabilité et statistique\\n\\n \\n HTML Base Tag Example\\n \\n \\n \\n \\\"Logo\\n \\n\\n\\n\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"from __future__ import print_function\\nimport numpy as np\\nimport pandas as pd\\nfrom ipywidgets import interact, interactive, fixed, interact_manual\\nimport ipywidgets as widgets\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"## Probabilités - approche fréquentiste\\n### Définition par la fréquence relative :\\n* une expérience d’ensemble fondamental est exécutée plusieurs fois sous les mêmes conditions.\\n* Pour chaque événement E de , n(E) est le nombre de fois où l’événement E survient lors des n premières répétitions de l’expérience.\\n* P(E), la probabilité de l’événement E est définie de la manière suivante :\\n\\n$$P(E)=\\\\lim_{n\\\\to\\\\infty}\\\\dfrac{n(E)}{n} $$ \",\n \"_____no_output_____\"\n ],\n [\n \"## Simulation d'un dé parfait\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# seed the random number generator\\nnp.random.seed(1)\\n\\n# Example: sampling \\n#\\n# do not forget that Python arrays are zero-indexed,\\n# and the 2nd argument to NumPy arange must be incremented by 1\\n# if you want to include that value\\nn = 6\\nk = 200000\\nT=np.random.choice(np.arange(1, n+1), k, replace=True)\\nunique, counts = np.unique(T, return_counts=True)\\ndic=dict(zip(unique, counts))\\ndf=pd.DataFrame(list(dic.items()),columns=['i','Occurence'])\\ndf.set_index(['i'], inplace=True)\\ndf['Freq']=df['Occurence']/k\\ndf['P({i})']='{}'.format(1/6)\\ndf\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"## Ajouter de l'intéraction \",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"def dice_sim(k=100):\\n n = 6 \\n T=np.random.choice(np.arange(1, n+1), k, replace=True)\\n unique, counts = np.unique(T, return_counts=True)\\n dic=dict(zip(unique, counts))\\n df=pd.DataFrame(list(dic.items()),columns=['i','Occurence'])\\n df.set_index(['i'], inplace=True)\\n df['Freq']=df['Occurence']/k\\n df['P({i})']='{0:.3f}'.format(1/6)\\n return df\\n \",\n \"_____no_output_____\"\n ],\n [\n \"dice_sim(100)\",\n \"_____no_output_____\"\n ],\n [\n \"interact(dice_sim,k=widgets.IntSlider(min=1000, max=50000, step=500, value=10));\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"## Cas d'un dé truqué\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"p=[0.1, 0.1, 0.1, 0.1,0.1,0.5]\\nsum(p)\",\n \"_____no_output_____\"\n ],\n [\n \"def dice_sim(k=100,q=[[0.1, 0.1, 0.1, 0.1,0.1,0.5],[0.2, 0.1, 0.2, 0.1,0.1,0.3]]):\\n n = 6\\n qq=q\\n T=np.random.choice(np.arange(1, n+1), k, replace=True,p=qq)\\n unique, counts = np.unique(T, return_counts=True)\\n dic=dict(zip(unique, counts))\\n df=pd.DataFrame(list(dic.items()),columns=['i','Occurence'])\\n df.set_index(['i'], inplace=True)\\n df['Freq']=df['Occurence']/k\\n df['P({i})']=['{0:.3f}'.format(j) for j in q]\\n return df\",\n \"_____no_output_____\"\n ],\n [\n \"interact(dice_sim,k=widgets.IntSlider(min=1000, max=50000, step=500, value=10));\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"## Exercice 1: \\n\\nTester l'intéraction précédente pour plusieurs valeurs de `p`\\nDonner votre conclusion :\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# Conclusion \\n\\n\\n\\n\\n\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"## Permutation Aléatoire\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"np.random.seed(2)\\n\\nm = 1\\nn = 10\\n\\nv = np.arange(m, n+1)\\nprint('v =', v)\\n\\nnp.random.shuffle(v)\\nprint('v, shuffled =', v)\",\n \"v = [ 1 2 3 4 5 6 7 8 9 10]\\nv, shuffled = [ 5 2 6 1 8 3 4 7 10 9]\\n\"\n ]\n ],\n [\n [\n \"## Exercice 2\\nVérifier que les permutation aléatoires sont uniforme , c'est à dire que la probabilité de générer une permutation d'élement de {1,2,3} est 1/6.\\nEn effet les permutations de {1,2,3} sont :\\n* 1 2 3\\n* 1 3 2\\n* 2 1 3\\n* 2 3 1\\n* 3 1 2\\n* 3 2 1\\n\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"k =10\\nm = 1\\nn = 3\\nv = np.arange(m, n+1)\\nT=[]\\nfor i in range(k):\\n np.random.shuffle(v)\\n w=np.copy(v)\\n T.append(w)\",\n \"_____no_output_____\"\n ],\n [\n \"TT=[str(i) for i in T]\\nTT\",\n \"_____no_output_____\"\n ],\n [\n \"k =1000\\nm = 1\\nn = 3\\nv = np.arange(m, n+1)\\nT=[]\\nfor i in range(k):\\n np.random.shuffle(v)\\n w=np.copy(v)\\n T.append(w)\\n\\nTT=[str(i) for i in T]\\nunique, counts = np.unique(TT, return_counts=True)\\ndic=dict(zip(unique, counts))\\ndf=pd.DataFrame(list(dic.items()),columns=['i','Occurence'])\\ndf.set_index(['i'], inplace=True)\\ndf['Freq']=df['Occurence']/k\\ndf['P({i,j,k})']='{0:.3f}'.format(1/6)\\ndf\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"### Donner votre conclusion en expliquant le script \",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"## Explication \\n\\n\\n\\n\\n\\n\\n\\n\\n\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"## Probabilité conditionnelle \",\n \"_____no_output_____\"\n ],\n [\n \"Rappelons que l'interprétation fréquentiste de la probabilité conditionnelle basée sur un grand nombre `n` de répétitions d'une expérience est $ P (A | B) ≈ n_ {AB} / n_ {B} $, où $ n_ {AB} $ est le nombre de fois où $ A \\\\cap B $ se produit et $ n_ {B} $ est le nombre de fois où $ B $ se produit. Essayons cela par simulation et vérifions les résultats de l'exemple 2.2.5. Utilisons donc [`numpy.random.choice`] (https://docs.scipy.org/doc/numpy-1.15.0/reference/generated/numpy.random.choice.html) pour simuler les familles` n`, chacun avec deux enfants.\\n\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"np.random.seed(34)\\n\\nn = 10**5\\nchild1 = np.random.choice([1,2], n, replace=True) \\nchild2 = np.random.choice([1,2], n, replace=True) \\n\\nprint('child1:\\\\n{}\\\\n'.format(child1))\\n\\nprint('child2:\\\\n{}\\\\n'.format(child2))\",\n \"child1:\\n[2 1 1 ... 1 2 1]\\n\\nchild2:\\n[2 2 2 ... 2 2 1]\\n\\n\"\n ]\n ],\n [\n [\n \"Ici, «child1» est un «tableau NumPy» de longueur «n», où chaque élément est un 1 ou un 2. En laissant 1 pour «fille» et 2 pour «garçon», ce «tableau» représente le sexe du enfant aîné dans chacune des familles «n». De même, «enfant2» représente le sexe du plus jeune enfant de chaque famille.\\n\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"np.random.choice([\\\"girl\\\", \\\"boy\\\"], n, replace=True)\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"mais il est plus pratique de travailler avec des valeurs numériques.\\n\\nSoit $ A $ l'événement où les deux enfants sont des filles et $ B $ l'événement où l'aîné est une fille. Suivant l'interprétation fréquentiste, nous comptons le nombre de répétitions où $ B $ s'est produit et le nommons `n_b`, et nous comptons également le nombre de répétitions où $ A \\\\cap B $ s'est produit et le nommons` n_ab`. Enfin, nous divisons `n_ab` par` n_b` pour approximer $ P (A | B) $.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"n_b = np.sum(child1==1)\\nn_ab = np.sum((child1==1) & (child2==1))\\n\\nprint('P(both girls | elder is girl) = {:0.2F}'.format(n_ab / n_b))\",\n \"P(both girls | elder is girl) = 0.50\\n\"\n ]\n ],\n [\n [\n \"L'esperluette `&` est un élément par élément $ AND $, donc `n_ab` est le nombre de familles où le premier et le deuxième enfant sont des filles. Lorsque nous avons exécuté ce code, nous avons obtenu 0,50, confirmant notre réponse $ P (\\\\text {les deux filles | l'aîné est une fille}) = 1/2 $.\\n\\nSoit maintenant $ A $ l'événement où les deux enfants sont des filles et $ B $ l'événement selon lequel au moins l'un des enfants est une fille. Alors $ A \\\\cap B $ est le même, mais `n_b` doit compter le nombre de familles où au moins un enfant est une fille. Ceci est accompli avec l'opérateur élémentaire $ OR $ `|` (ce n'est pas une barre de conditionnement; c'est un $ OR $ inclusif, retournant `True` si au moins un élément est` True`).\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"n_b = np.sum((child1==1) | (child2==2))\\nn_ab = np.sum((child1==1) & (child2==1))\\n\\nprint('P(both girls | at least one girl) = {:0.2F}'.format(n_ab / n_b))\",\n \"P(both girls | at least one girl) = 0.33\\n\"\n ]\n ],\n [\n [\n \"Pour nous, le résultat était de 0,33, confirmant que $ P (\\\\text {les deux filles | au moins une fille}) = 1/3 $.\",\n \"_____no_output_____\"\n ]\n ]\n]"},"cell_types":{"kind":"list like","value":["markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown"],"string":"[\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\"\n]"},"cell_type_groups":{"kind":"list like","value":[["markdown"],["code"],["markdown","markdown"],["code"],["markdown"],["code","code","code"],["markdown"],["code","code","code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code","code","code"],["markdown"],["code"],["markdown","markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"]],"string":"[\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\",\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\",\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ]\n]"}}},{"rowIdx":1458801,"cells":{"hexsha":{"kind":"string","value":"e7e2ae4085607041ae3e7ad535ae5d06fed1568c"},"size":{"kind":"number","value":45388,"string":"45,388"},"ext":{"kind":"string","value":"ipynb"},"lang":{"kind":"string","value":"Jupyter Notebook"},"max_stars_repo_path":{"kind":"string","value":"src/math_bot/model/GloVeSiameseLSTM.ipynb"},"max_stars_repo_name":{"kind":"string","value":"AlinGeorgescu/Math-Bot"},"max_stars_repo_head_hexsha":{"kind":"string","value":"bf9d2a9c373bd85574a11c11e33526c46723df01"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":3,"string":"3"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2021-06-01T16:10:18.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2021-08-06T00:37:14.000Z"},"max_issues_repo_path":{"kind":"string","value":"src/math_bot/model/GloVeSiameseLSTM.ipynb"},"max_issues_repo_name":{"kind":"string","value":"AlinGeorgescu/Math-Bot"},"max_issues_repo_head_hexsha":{"kind":"string","value":"bf9d2a9c373bd85574a11c11e33526c46723df01"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"src/math_bot/model/GloVeSiameseLSTM.ipynb"},"max_forks_repo_name":{"kind":"string","value":"AlinGeorgescu/Math-Bot"},"max_forks_repo_head_hexsha":{"kind":"string","value":"bf9d2a9c373bd85574a11c11e33526c46723df01"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"avg_line_length":{"kind":"number","value":36.8110300081,"string":"36.81103"},"max_line_length":{"kind":"number","value":586,"string":"586"},"alphanum_fraction":{"kind":"number","value":0.5748215387,"string":"0.574822"},"cells":{"kind":"list like","value":[[["# [Math-Bot] Siamese LSTM: Detecting duplicates\n\n\n\nAuthor: Alin-Andrei Georgescu 2021\n\nWelcome to my notebook! It explores the Siamese networks applied to natural language processing. The model is intended to detect duplicates, in other words to check if two sentences are similar.\nThe model uses \"Long short-term memory\" (LSTM) neural networks, which are an artificial recurrent neural networks (RNNs). This version uses GloVe pretrained vectors.\n\n## Outline\n\n- [Overview](#0)\n- [Part 1: Importing the Data](#1)\n - [1.1 Loading in the data](#1.1)\n - [1.2 Converting a sentence to a tensor](#1.2)\n - [1.3 Understanding and building the iterator](#1.3)\n- [Part 2: Defining the Siamese model](#2)\n - [2.1 Understanding and building the Siamese Network](#2.1)\n - [2.2 Implementing Hard Negative Mining](#2.2)\n- [Part 3: Training](#3)\n- [Part 4: Evaluation](#4)\n- [Part 5: Making predictions](#5)\n\n\n### Overview\n\nGeneral ideas:\n- Designing a Siamese networks model\n- Implementing the triplet loss\n- Evaluating accuracy\n- Using cosine similarity between the model's outputted vectors\n- Working with Trax and Numpy libraries in Python 3\n\nThe LSTM cell's architecture (source: https://www.researchgate.net/figure/The-structure-of-the-LSTM-unit_fig2_331421650):\n\n\n\n\nI will start by preprocessing the data, then I will build a classifier that will identify whether two sentences are the same or not. \n\n\nI tokenized the data, then split the dataset into training and testing sets. I loaded pretrained GloVe word embeddings and built a sentence's vector by averaging the composing word's vectors. The model takes in the two sentence embeddings, runs them through an LSTM, and then compares the outputs of the two sub networks using cosine similarity.\n\nThis notebook has been built based on Coursera's Natural Language Processing Specialization.\n","_____no_output_____"],["\n# Part 1: Importing the Data\n\n### 1.1 Loading in the data\n\nFirst step in building a model is building a dataset. I used three datasets in building my model:\n- the Quora Question Pairs\n- edited SICK dataset\n- custom Maths duplicates dataset\n\nRun the cell below to import some of the needed packages. ","_____no_output_____"]],[["import os\nimport re\nimport nltk\nfrom nltk.corpus import stopwords\nnltk.download('punkt')\nnltk.download('stopwords')\nnltk.download('wordnet')\n\nimport numpy as np\nimport pandas as pd\nimport random as rnd\n\n!pip install textcleaner\nimport textcleaner as tc\n\n!pip install trax\nimport trax\nfrom trax import layers as tl\nfrom trax.supervised import training\nfrom trax.fastmath import numpy as fastnp\n\n!pip install gensim\nfrom gensim.models import KeyedVectors\nfrom gensim.scripts.glove2word2vec import glove2word2vec\n\nfrom collections import defaultdict\n\n# set random seeds\nrnd.seed(34)","_____no_output_____"]],[["**Notice that in this notebook Trax's numpy is referred to as `fastnp`, while regular numpy is referred to as `np`.**\n\nNow the dataset and word embeddings will get loaded and the data processed.","_____no_output_____"]],[["data = pd.read_csv(\"data/merged_dataset.csv\", encoding=\"utf-8\")\n\nN = len(data)\nprint(\"Number of sentence pairs: \", N)\n\ndata.head()\n\n!wget -O data/glove.840B.300d.zip nlp.stanford.edu/data/glove.840B.300d.zip\n!unzip -d data data/glove.840B.300d.zip\n!rm data/glove.840B.300d.zip\nvec_model = KeyedVectors.load_word2vec_format(\"data/glove.840B.300d.txt\", binary=False, no_header=True) ","_____no_output_____"]],[["Then I split the data into a train and test set. The test set will be used later to evaluate the model.","_____no_output_____"]],[["N_dups = len(data[data.is_duplicate == 1])\n\n# Take 90% of the duplicates for the train set\nN_train = int(N_dups * 0.9)\nprint(N_train)\n\n# Take the rest of the duplicates for the test set + an equal number of non-dups\nN_test = (N_dups - N_train) * 2\nprint(N_test)\n\ndata_train = data[: N_train]\n# Shuffle the train set\ndata_train = data_train.sample(frac=1)\n\ndata_test = data[N_train : N_train + N_test]\n# Shuffle the test set\ndata_test = data_test.sample(frac=1)\n\nprint(\"Train set: \", len(data_train), \"; Test set: \", len(data_test))\n\n# Remove the unneeded data to some memory\ndel(data)","_____no_output_____"],["S1_train_words = np.array(data_train[\"sentence1\"])\nS2_train_words = np.array(data_train[\"sentence2\"])\n\nS1_test_words = np.array(data_test[\"sentence1\"])\nS2_test_words = np.array(data_test[\"sentence2\"])\ny_test = np.array(data_test[\"is_duplicate\"])\n\ndel(data_train)\ndel(data_test)","_____no_output_____"]],[["Above, you have seen that the model only takes the duplicated sentences for training.\nAll this has a purpose, as the data generator will produce batches $([s1_1, s1_2, s1_3, ...]$, $[s2_1, s2_2,s2_3, ...])$, where $s1_i$ and $s2_k$ are duplicate if and only if $i = k$.\n\nAn example of how the data looks is shown below.","_____no_output_____"]],[["print(\"TRAINING SENTENCES:\\n\")\nprint(\"Sentence 1: \", S1_train_words[0])\nprint(\"Sentence 2: \", S2_train_words[0], \"\\n\")\nprint(\"Sentence 1: \", S1_train_words[5])\nprint(\"Sentence 2: \", S2_train_words[5], \"\\n\")\n\nprint(\"TESTING SENTENCES:\\n\")\nprint(\"Sentence 1: \", S1_test_words[0])\nprint(\"Sentence 2: \", S2_test_words[0], \"\\n\")\nprint(\"is_duplicate =\", y_test[0], \"\\n\")","_____no_output_____"]],[["The first step is to tokenize the sentences using a custom tokenizer defined below.","_____no_output_____"]],[["# Create arrays\nS1_train = np.empty_like(S1_train_words)\nS2_train = np.empty_like(S2_train_words)\n\nS1_test = np.empty_like(S1_test_words)\nS2_test = np.empty_like(S2_test_words)","_____no_output_____"],["def data_tokenizer(sentence):\n \"\"\"Tokenizer function - cleans and tokenizes the data\n\n Args:\n sentence (str): The input sentence.\n Returns:\n list: The transformed input sentence.\n \"\"\"\n \n if sentence == \"\":\n return \"\"\n\n sentence = tc.lower_all(sentence)[0]\n\n # Change tabs to spaces\n sentence = re.sub(r\"\\t+_+\", \" \", sentence)\n # Change short forms\n sentence = re.sub(r\"\\'ve\", \" have\", sentence)\n sentence = re.sub(r\"(can\\'t|can not)\", \"cannot\", sentence)\n sentence = re.sub(r\"n\\'t\", \" not\", sentence)\n sentence = re.sub(r\"I\\'m\", \"I am\", sentence)\n sentence = re.sub(r\" m \", \" am \", sentence)\n sentence = re.sub(r\"(\\'re| r )\", \" are \", sentence)\n sentence = re.sub(r\"\\'d\", \" would \", sentence)\n sentence = re.sub(r\"\\'ll\", \" will \", sentence)\n sentence = re.sub(r\"(\\d+)(k)\", r\"\\g<1>000\", sentence)\n # Make word separations\n sentence = re.sub(r\"(\\+|-|\\*|\\/|\\^|\\.)\", \" $1 \", sentence)\n # Remove irrelevant stuff, nonprintable characters and spaces\n sentence = re.sub(r\"(\\'s|\\'S|\\'|\\\"|,|[^ -~]+)\", \"\", sentence)\n sentence = tc.strip_all(sentence)[0]\n\n if sentence == \"\":\n return \"\"\n\n return tc.token_it(tc.lemming(sentence))[0]","_____no_output_____"],["for idx in range(len(S1_train_words)):\n S1_train[idx] = data_tokenizer(S1_train_words[idx])\n\nfor idx in range(len(S2_train_words)):\n S2_train[idx] = data_tokenizer(S2_train_words[idx])\n \nfor idx in range(len(S1_test_words)): \n S1_test[idx] = data_tokenizer(S1_test_words[idx])\n\nfor idx in range(len(S2_test_words)): \n S2_test[idx] = data_tokenizer(S2_test_words[idx])","_____no_output_____"]],[["\n### 1.2 Converting a sentence to a tensor\n\nThe next step is to convert every sentence to a tensor, or an array of numbers, using the word embeddings loaded above.","_____no_output_____"]],[["stop_words = set(stopwords.words('english'))\n\n# Converting sentences to vectors. OOV words or stopwords will be discarded.\nS1_train_vec = np.empty_like(S1_train)\nfor i in range(len(S1_train)):\n S1_train_vec[i] = np.zeros((300,))\n for word in S1_train[i]:\n if word not in stop_words and word in vec_model.key_to_index:\n S1_train_vec[i] += vec_model[word]\n S1_train[i] = S1_train_vec[i] / len(S1_train[i])\n\nS2_train_vec = np.empty_like(S2_train)\nfor i in range(len(S2_train)):\n S2_train_vec[i] = np.zeros((300,))\n for word in S2_train[i]:\n if word not in stop_words and word in vec_model.key_to_index:\n S2_train_vec[i] += vec_model[word]\n S2_train[i] = S2_train_vec[i] / len(S2_train[i])\n\n\nS1_test_vec = np.empty_like(S1_test)\nfor i in range(len(S1_test)):\n S1_test_vec[i] = np.zeros((300,))\n for word in S1_test[i]:\n if word not in stop_words and word in vec_model.key_to_index:\n S1_test_vec[i] += vec_model[word]\n S1_test[i] = S1_test_vec[i] / len(S1_test[i])\n\nS2_test_vec = np.empty_like(S2_test)\nfor i in range(len(S2_test)):\n S2_test_vec[i] = np.zeros((300,))\n for word in S2_test[i]:\n if word not in stop_words and word in vec_model.key_to_index:\n S2_test_vec[i] += vec_model[word]\n S2_test[i] = S2_test_vec[i] / len(S2_test[i])","_____no_output_____"],["print(\"FIRST SENTENCE IN TRAIN SET:\\n\")\nprint(S1_train_words[0], \"\\n\") \nprint(\"ENCODED VERSION:\")\nprint(S1_train[0],\"\\n\")\ndel(S1_train_words)\ndel(S2_train_words)\n\nprint(\"FIRST SENTENCE IN TEST SET:\\n\")\nprint(S1_test_words[0], \"\\n\")\nprint(\"ENCODED VERSION:\")\nprint(S1_test[0])\ndel(S1_test_words)\ndel(S2_test_words)","_____no_output_____"]],[["Now, the train set must be split into a training/validation set so that it can be used to train and evaluate the Siamese model.","_____no_output_____"]],[["# Splitting the data\ncut_off = int(len(S1_train) * .8)\ntrain_S1, train_S2 = S1_train[: cut_off], S2_train[: cut_off]\nval_S1, val_S2 = S1_train[cut_off :], S2_train[cut_off :]\nprint(\"Number of duplicate sentences: \", len(S1_train))\nprint(\"The length of the training set is: \", len(train_S1))\nprint(\"The length of the validation set is: \", len(val_S1))","_____no_output_____"]],[["\n### 1.3 Understanding and building the iterator \n\nGiven the compational limits, we need to split our data into batches. In this notebook, I built a data generator that takes in $S1$ and $S2$ and returned a batch of size `batch_size` in the following format $([s1_1, s1_2, s1_3, ...]$, $[s2_1, s2_2,s2_3, ...])$. The tuple consists of two arrays and each array has `batch_size` sentences. Again, $s1_i$ and $s2_i$ are duplicates, but they are not duplicates with any other elements in the batch. \n\nThe command `next(data_generator)` returns the next batch. This iterator returns a pair of arrays of sentences, which will later be used in the model.\n\n**The ideas behind:** \n- The generator returns shuffled batches of data. To achieve this without modifying the actual sentence lists, a list containing the indexes of the sentences is created. This list can be shuffled and used to get random batches everytime the index is reset.\n- Append elements of $S1$ and $S2$ to `input1` and `input2` respectively.","_____no_output_____"]],[["def data_generator(S1, S2, batch_size, shuffle=False):\n \"\"\"Generator function that yields batches of data\n\n Args:\n S1 (list): List of transformed (to tensor) sentences.\n S2 (list): List of transformed (to tensor) sentences.\n batch_size (int): Number of elements per batch.\n shuffle (bool, optional): If the batches should be randomnized or not. Defaults to False.\n Yields:\n tuple: Of the form (input1, input2) with types (numpy.ndarray, numpy.ndarray)\n NOTE: input1: inputs to your model [s1a, s2a, s3a, ...] i.e. (s1a,s1b) are duplicates\n input2: targets to your model [s1b, s2b,s3b, ...] i.e. (s1a,s2i) i!=a are not duplicates\n \"\"\"\n\n input1 = []\n input2 = []\n idx = 0\n len_s = len(S1)\n sentence_indexes = [*range(len_s)]\n\n if shuffle:\n rnd.shuffle(sentence_indexes)\n\n while True:\n if idx >= len_s:\n # If idx is greater than or equal to len_q, reset it\n idx = 0\n # Shuffle to get random batches if shuffle is set to True\n if shuffle:\n rnd.shuffle(sentence_indexes)\n\n s1 = S1[sentence_indexes[idx]]\n s2 = S2[sentence_indexes[idx]]\n\n idx += 1\n\n input1.append(s1)\n input2.append(s2)\n\n if len(input1) == batch_size:\n b1 = []\n b2 = []\n for s1, s2 in zip(input1, input2):\n # Append s1\n b1.append(s1)\n # Append s2\n b2.append(s2)\n\n # Use b1 and b2\n yield np.array(b1).reshape((batch_size, 1, -1)), np.array(b2).reshape((batch_size, 1, -1))\n\n # reset the batches\n input1, input2 = [], []","_____no_output_____"],["batch_size = 2\nres1, res2 = next(data_generator(train_S1, train_S2, batch_size))\nprint(\"First sentences :\\n\", res1, \"\\n Shape: \", res1.shape)\nprint(\"Second sentences :\\n\", res2, \"\\n Shape: \", res2.shape)","_____no_output_____"]],[["Now we can go ahead and start building the neural network, as we have a data generator.","_____no_output_____"],["\n# Part 2: Defining the Siamese model\n\n\n\n### 2.1 Understanding and building the Siamese Network \n\nA Siamese network is a neural network which uses the same weights while working in tandem on two different input vectors to compute comparable output vectors. The Siamese network model proposed in this notebook looks like this:\n\n\n\nThe sentences' embeddings are passed to an LSTM layer, the output vectors, $v_1$ and $v_2$, are normalized, and finally a triplet loss is used to get the corresponding cosine similarity for each pair of sentences. The triplet loss makes use of a baseline (anchor) input that is compared to a positive (truthy) input and a negative (falsy) input. The distance from the baseline (anchor) input to the positive (truthy) input is minimized, and the distance from the baseline (anchor) input to the negative (falsy) input is maximized. In math equations, the following is maximized:\n\n$$\\mathcal{L}(A, P, N)=\\max \\left(\\|\\mathrm{f}(A)-\\mathrm{f}(P)\\|^{2}-\\|\\mathrm{f}(A)-\\mathrm{f}(N)\\|^{2}+\\alpha, 0\\right)$$\n\n$A$ is the anchor input, for example $s1_1$, $P$ the duplicate input, for example, $s2_1$, and $N$ the negative input (the non duplicate sentence), for example $s2_2$.
\n$\\alpha$ is a margin - how much the duplicates are pushed from the non duplicates. \n
\n\n**The ideas behind:**\n- Trax library is used in implementing the model.\n- `tl.Serial`: Combinator that applies layers serially (by function composition) allowing the set up the overall structure of the feedforward.\n- `tl.LSTM` The LSTM layer. \n- `tl.Mean`: Computes the mean across a desired axis. Mean uses one tensor axis to form groups of values and replaces each group with the mean value of that group.\n- `tl.Fn` Layer with no weights that applies the function f - vector normalization in this case.\n- `tl.parallel`: It is a combinator layer (like `Serial`) that applies a list of layers in parallel to its inputs.\n","_____no_output_____"]],[["def Siamese(d_model=300):\n \"\"\"Returns a Siamese model.\n\n Args:\n d_model (int, optional): Depth of the model. Defaults to 128.\n mode (str, optional): \"train\", \"eval\" or \"predict\", predict mode is for fast inference. Defaults to \"train\".\n\n Returns:\n trax.layers.combinators.Parallel: A Siamese model. \n \"\"\"\n\n def normalize(x): # normalizes the vectors to have L2 norm 1\n return x / fastnp.sqrt(fastnp.sum(x * x, axis=-1, keepdims=True))\n\n s_processor = tl.Serial( # Processor will run on S1 and S2.\n tl.LSTM(d_model), # LSTM layer\n tl.Mean(axis=1), # Mean over columns\n tl.Fn('Normalize', lambda x: normalize(x)) # Apply normalize function\n ) # Returns one vector of shape [batch_size, d_model].\n \n # Run on S1 and S2 in parallel.\n model = tl.Parallel(s_processor, s_processor)\n return model\n","_____no_output_____"]],[["Setup the Siamese network model.","_____no_output_____"]],[["# Check the model\nmodel = Siamese(d_model=300)\nprint(model)","_____no_output_____"]],[["\n\n### 2.2 Implementing Hard Negative Mining\n\n\nNow it's the time to implement the `TripletLoss`.\nAs explained earlier, loss is composed of two terms. One term utilizes the mean of all the non duplicates, the second utilizes the *closest negative*. The loss expression is then:\n \n\\begin{align}\n \\mathcal{Loss_1(A,P,N)} &=\\max \\left( -cos(A,P) + mean_{neg} +\\alpha, 0\\right) \\\\\n \\mathcal{Loss_2(A,P,N)} &=\\max \\left( -cos(A,P) + closest_{neg} +\\alpha, 0\\right) \\\\\n\\mathcal{Loss(A,P,N)} &= mean(Loss_1 + Loss_2) \\\\\n\\end{align}","_____no_output_____"]],[["def TripletLossFn(v1, v2, margin=0.25):\n \"\"\"Custom Loss function.\n\n Args:\n v1 (numpy.ndarray): Array with dimension (batch_size, model_dimension) associated to S1.\n v2 (numpy.ndarray): Array with dimension (batch_size, model_dimension) associated to S2.\n margin (float, optional): Desired margin. Defaults to 0.25.\n\n Returns:\n jax.interpreters.xla.DeviceArray: Triplet Loss.\n \"\"\"\n\n scores = fastnp.dot(v1, v2.T) # pairwise cosine sim\n batch_size = len(scores)\n\n positive = fastnp.diagonal(scores) # the positive ones (duplicates)\n negative_without_positive = scores - 2.0 * fastnp.eye(batch_size)\n\n closest_negative = fastnp.max(negative_without_positive, axis=1)\n negative_zero_on_duplicate = (1.0 - fastnp.eye(batch_size)) * scores\n mean_negative = fastnp.sum(negative_zero_on_duplicate, axis=1) / (batch_size - 1)\n\n triplet_loss1 = fastnp.maximum(0.0, margin - positive + closest_negative)\n triplet_loss2 = fastnp.maximum(0.0, margin - positive + mean_negative)\n triplet_loss = fastnp.mean(triplet_loss1 + triplet_loss2)\n \n return triplet_loss","_____no_output_____"],["v1 = np.array([[0.26726124, 0.53452248, 0.80178373],[0.5178918 , 0.57543534, 0.63297887]])\nv2 = np.array([[0.26726124, 0.53452248, 0.80178373],[-0.5178918 , -0.57543534, -0.63297887]])\nTripletLossFn(v2,v1)\nprint(\"Triplet Loss:\", TripletLossFn(v2,v1))","_____no_output_____"]],[["**Expected Output:**\n```CPP\nTriplet Loss: 1.0\n``` ","_____no_output_____"]],[["from functools import partial\ndef TripletLoss(margin=1):\n # Trax layer creation\n triplet_loss_fn = partial(TripletLossFn, margin=margin)\n return tl.Fn(\"TripletLoss\", triplet_loss_fn)","_____no_output_____"]],[["\n\n# Part 3: Training\n\nThe next step is model training - defining the cost function and the optimizer, feeding in the built model. But first I will define the data generators used in the model.","_____no_output_____"]],[["batch_size = 512\ntrain_generator = data_generator(train_S1, train_S2, batch_size)\nval_generator = data_generator(val_S1, val_S2, batch_size)\nprint(\"train_S1.shape \", train_S1.shape)\nprint(\"val_S1.shape \", val_S1.shape)","_____no_output_____"]],[["Now, I will define the training step. Each training iteration is defined as an `epoch`, each epoch being an iteration over all the data, using the training iterator.\n\n**The ideas behind:**\n- Two tasks are needed: `TrainTask` and `EvalTask`.\n- The training runs in a trax loop `trax.supervised.training.Loop`.\n- Pass the other parameters to a loop.","_____no_output_____"]],[["def train_model(Siamese, TripletLoss, lr_schedule, train_generator=train_generator, val_generator=val_generator, output_dir=\"trax_model/\"):\n \"\"\"Training the Siamese Model\n\n Args:\n Siamese (function): Function that returns the Siamese model.\n TripletLoss (function): Function that defines the TripletLoss loss function.\n lr_schedule (function): Trax multifactor schedule function.\n train_generator (generator, optional): Training generator. Defaults to train_generator.\n val_generator (generator, optional): Validation generator. Defaults to val_generator.\n output_dir (str, optional): Path to save model to. Defaults to \"trax_model/\".\n\n Returns:\n trax.supervised.training.Loop: Training loop for the model.\n \"\"\"\n\n output_dir = os.path.expanduser(output_dir)\n\n train_task = training.TrainTask(\n labeled_data=train_generator,\n loss_layer=TripletLoss(),\n optimizer=trax.optimizers.Adam(0.01),\n lr_schedule=lr_schedule\n )\n\n eval_task = training.EvalTask(\n labeled_data=val_generator,\n metrics=[TripletLoss()]\n )\n\n training_loop = training.Loop(Siamese(),\n train_task,\n eval_tasks=[eval_task],\n output_dir=output_dir,\n random_seed=34)\n\n return training_loop","_____no_output_____"],["train_steps = 1500\nlr_schedule = trax.lr.warmup_and_rsqrt_decay(400, 0.01)\ntraining_loop = train_model(Siamese, TripletLoss, lr_schedule)\ntraining_loop.run(train_steps)","_____no_output_____"]],[["\n\n# Part 4: Evaluation","_____no_output_____"],["To determine the accuracy of the model, the test set that was configured earlier is used. While the training used only positive examples, the test data, S1_test, S2_test and y_test, is setup as pairs of sentences, some of which are duplicates some are not. \nThis routine runs all the test sentences pairs through the model, computes the cosine simlarity of each pair, thresholds it and compares the result to y_test - the correct response from the data set. The results are accumulated to produce an accuracy.\n\n**The ideas behind:** \n - The model loops through the incoming data in batch_size chunks.\n - The output vectors are computed and their cosine similarity is thresholded.","_____no_output_____"]],[["def classify(test_S1, test_S2, y, threshold, model, data_generator=data_generator, batch_size=64):\n \"\"\"Function to test the model. Calculates some metrics, such as precision, accuracy, recall and F1 score.\n\n Args:\n test_S1 (numpy.ndarray): Array of S1 sentences.\n test_S2 (numpy.ndarray): Array of S2 sentences.\n y (numpy.ndarray): Array of actual target.\n threshold (float): Desired threshold.\n model (trax.layers.combinators.Parallel): The Siamese model.\n data_generator (function): Data generator function. Defaults to data_generator.\n batch_size (int, optional): Size of the batches. Defaults to 64.\n\n Returns:\n (float, float, float, float): Accuracy, precision, recall and F1 score of the model.\n \"\"\"\n\n true_pos = 0\n true_neg = 0\n false_pos = 0\n false_neg = 0\n\n for i in range(0, len(test_S1), batch_size):\n to_process = len(test_S1) - i\n\n if to_process < batch_size:\n batch_size = to_process\n\n s1, s2 = next(data_generator(test_S1[i : i + batch_size], test_S2[i : i + batch_size], batch_size, shuffle=False))\n y_test = y[i : i + batch_size]\n\n v1, v2 = model((s1, s2))\n\n for j in range(batch_size):\n d = np.dot(v1[j], v2[j].T)\n res = d > threshold\n\n if res == 1:\n if y_test[j] == res:\n true_pos += 1\n else:\n false_pos += 1\n else:\n if y_test[j] == res:\n true_neg += 1\n else:\n false_neg += 1\n\n accuracy = (true_pos + true_neg) / (true_pos + true_neg + false_pos + false_neg)\n precision = true_pos / (true_pos + false_pos)\n recall = true_pos / (true_pos + false_neg)\n f1_score = 2 * precision * recall / (precision + recall)\n \n return (accuracy, precision, recall, f1_score)","_____no_output_____"],["print(len(S1_test))","_____no_output_____"],["# Loading in the saved model\nmodel = Siamese()\nmodel.init_from_file(\"trax_model/model.pkl.gz\")\n# Evaluating it\naccuracy, precision, recall, f1_score = classify(S1_test, S2_test, y_test, 0.7, model, batch_size=512) \nprint(\"Accuracy\", accuracy)\nprint(\"Precision\", precision)\nprint(\"Recall\", recall)\nprint(\"F1 score\", f1_score)","_____no_output_____"]],[["\n\n# Part 5: Making predictions\n\nIn this section the model will be put to work. It will be wrapped in a function called `predict` which takes two sentences as input and returns $1$ or $0$, depending on whether the pair is a duplicate or not. \n\nBut first, we need to embed the sentences.","_____no_output_____"]],[["def predict(sentence1, sentence2, threshold, model, data_generator=data_generator, verbose=False):\n \"\"\"Function for predicting if two sentences are duplicates.\n\n Args:\n sentence1 (str): First sentence.\n sentence2 (str): Second sentence.\n threshold (float): Desired threshold.\n model (trax.layers.combinators.Parallel): The Siamese model.\n data_generator (function): Data generator function. Defaults to data_generator.\n verbose (bool, optional): If the results should be printed out. Defaults to False.\n\n Returns:\n bool: True if the sentences are duplicates, False otherwise.\n \"\"\"\n\n s1 = data_tokenizer(sentence1) # tokenize\n S1 = np.zeros((300,))\n for word in s1:\n if word not in stop_words and word in vec_model.key_to_index:\n S1 += vec_model[word]\n S1 = S1 / len(s1)\n \n s2 = data_tokenizer(sentence2) # tokenize\n S2 = np.zeros((300,))\n for word in s2:\n if word not in stop_words and word in vec_model.key_to_index:\n S1 += vec_model[word]\n S2 = S2 / len(s2)\n\n S1, S2 = next(data_generator([S1], [S2], 1))\n\n v1, v2 = model((S1, S2))\n d = np.dot(v1[0], v2[0].T)\n res = d > threshold\n \n if verbose == True:\n print(\"S1 = \", S1, \"\\nS2 = \", S2)\n print(\"d = \", d)\n print(\"res = \", res)\n\n return res","_____no_output_____"]],[["Now we can test the model's ability to make predictions.","_____no_output_____"]],[["sentence1 = \"I love running in the park.\"\nsentence2 = \"I like running in park?\"\n# 1 means it is duplicated, 0 otherwise\npredict(sentence1 , sentence2, 0.7, model, verbose=True)","_____no_output_____"]],[["The Siamese network is capable of catching complicated structures. Concretely, it can identify sentence duplicates although the sentences do not have many words in common.","_____no_output_____"]]],"string":"[\n [\n [\n \"# [Math-Bot] Siamese LSTM: Detecting duplicates\\n\\n\\n\\nAuthor: Alin-Andrei Georgescu 2021\\n\\nWelcome to my notebook! It explores the Siamese networks applied to natural language processing. The model is intended to detect duplicates, in other words to check if two sentences are similar.\\nThe model uses \\\"Long short-term memory\\\" (LSTM) neural networks, which are an artificial recurrent neural networks (RNNs). This version uses GloVe pretrained vectors.\\n\\n## Outline\\n\\n- [Overview](#0)\\n- [Part 1: Importing the Data](#1)\\n - [1.1 Loading in the data](#1.1)\\n - [1.2 Converting a sentence to a tensor](#1.2)\\n - [1.3 Understanding and building the iterator](#1.3)\\n- [Part 2: Defining the Siamese model](#2)\\n - [2.1 Understanding and building the Siamese Network](#2.1)\\n - [2.2 Implementing Hard Negative Mining](#2.2)\\n- [Part 3: Training](#3)\\n- [Part 4: Evaluation](#4)\\n- [Part 5: Making predictions](#5)\\n\\n\\n### Overview\\n\\nGeneral ideas:\\n- Designing a Siamese networks model\\n- Implementing the triplet loss\\n- Evaluating accuracy\\n- Using cosine similarity between the model's outputted vectors\\n- Working with Trax and Numpy libraries in Python 3\\n\\nThe LSTM cell's architecture (source: https://www.researchgate.net/figure/The-structure-of-the-LSTM-unit_fig2_331421650):\\n\\n\\n\\n\\nI will start by preprocessing the data, then I will build a classifier that will identify whether two sentences are the same or not. \\n\\n\\nI tokenized the data, then split the dataset into training and testing sets. I loaded pretrained GloVe word embeddings and built a sentence's vector by averaging the composing word's vectors. The model takes in the two sentence embeddings, runs them through an LSTM, and then compares the outputs of the two sub networks using cosine similarity.\\n\\nThis notebook has been built based on Coursera's Natural Language Processing Specialization.\\n\",\n \"_____no_output_____\"\n ],\n [\n \"\\n# Part 1: Importing the Data\\n\\n### 1.1 Loading in the data\\n\\nFirst step in building a model is building a dataset. I used three datasets in building my model:\\n- the Quora Question Pairs\\n- edited SICK dataset\\n- custom Maths duplicates dataset\\n\\nRun the cell below to import some of the needed packages. \",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"import os\\nimport re\\nimport nltk\\nfrom nltk.corpus import stopwords\\nnltk.download('punkt')\\nnltk.download('stopwords')\\nnltk.download('wordnet')\\n\\nimport numpy as np\\nimport pandas as pd\\nimport random as rnd\\n\\n!pip install textcleaner\\nimport textcleaner as tc\\n\\n!pip install trax\\nimport trax\\nfrom trax import layers as tl\\nfrom trax.supervised import training\\nfrom trax.fastmath import numpy as fastnp\\n\\n!pip install gensim\\nfrom gensim.models import KeyedVectors\\nfrom gensim.scripts.glove2word2vec import glove2word2vec\\n\\nfrom collections import defaultdict\\n\\n# set random seeds\\nrnd.seed(34)\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"**Notice that in this notebook Trax's numpy is referred to as `fastnp`, while regular numpy is referred to as `np`.**\\n\\nNow the dataset and word embeddings will get loaded and the data processed.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"data = pd.read_csv(\\\"data/merged_dataset.csv\\\", encoding=\\\"utf-8\\\")\\n\\nN = len(data)\\nprint(\\\"Number of sentence pairs: \\\", N)\\n\\ndata.head()\\n\\n!wget -O data/glove.840B.300d.zip nlp.stanford.edu/data/glove.840B.300d.zip\\n!unzip -d data data/glove.840B.300d.zip\\n!rm data/glove.840B.300d.zip\\nvec_model = KeyedVectors.load_word2vec_format(\\\"data/glove.840B.300d.txt\\\", binary=False, no_header=True) \",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"Then I split the data into a train and test set. The test set will be used later to evaluate the model.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"N_dups = len(data[data.is_duplicate == 1])\\n\\n# Take 90% of the duplicates for the train set\\nN_train = int(N_dups * 0.9)\\nprint(N_train)\\n\\n# Take the rest of the duplicates for the test set + an equal number of non-dups\\nN_test = (N_dups - N_train) * 2\\nprint(N_test)\\n\\ndata_train = data[: N_train]\\n# Shuffle the train set\\ndata_train = data_train.sample(frac=1)\\n\\ndata_test = data[N_train : N_train + N_test]\\n# Shuffle the test set\\ndata_test = data_test.sample(frac=1)\\n\\nprint(\\\"Train set: \\\", len(data_train), \\\"; Test set: \\\", len(data_test))\\n\\n# Remove the unneeded data to some memory\\ndel(data)\",\n \"_____no_output_____\"\n ],\n [\n \"S1_train_words = np.array(data_train[\\\"sentence1\\\"])\\nS2_train_words = np.array(data_train[\\\"sentence2\\\"])\\n\\nS1_test_words = np.array(data_test[\\\"sentence1\\\"])\\nS2_test_words = np.array(data_test[\\\"sentence2\\\"])\\ny_test = np.array(data_test[\\\"is_duplicate\\\"])\\n\\ndel(data_train)\\ndel(data_test)\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"Above, you have seen that the model only takes the duplicated sentences for training.\\nAll this has a purpose, as the data generator will produce batches $([s1_1, s1_2, s1_3, ...]$, $[s2_1, s2_2,s2_3, ...])$, where $s1_i$ and $s2_k$ are duplicate if and only if $i = k$.\\n\\nAn example of how the data looks is shown below.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"print(\\\"TRAINING SENTENCES:\\\\n\\\")\\nprint(\\\"Sentence 1: \\\", S1_train_words[0])\\nprint(\\\"Sentence 2: \\\", S2_train_words[0], \\\"\\\\n\\\")\\nprint(\\\"Sentence 1: \\\", S1_train_words[5])\\nprint(\\\"Sentence 2: \\\", S2_train_words[5], \\\"\\\\n\\\")\\n\\nprint(\\\"TESTING SENTENCES:\\\\n\\\")\\nprint(\\\"Sentence 1: \\\", S1_test_words[0])\\nprint(\\\"Sentence 2: \\\", S2_test_words[0], \\\"\\\\n\\\")\\nprint(\\\"is_duplicate =\\\", y_test[0], \\\"\\\\n\\\")\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"The first step is to tokenize the sentences using a custom tokenizer defined below.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# Create arrays\\nS1_train = np.empty_like(S1_train_words)\\nS2_train = np.empty_like(S2_train_words)\\n\\nS1_test = np.empty_like(S1_test_words)\\nS2_test = np.empty_like(S2_test_words)\",\n \"_____no_output_____\"\n ],\n [\n \"def data_tokenizer(sentence):\\n \\\"\\\"\\\"Tokenizer function - cleans and tokenizes the data\\n\\n Args:\\n sentence (str): The input sentence.\\n Returns:\\n list: The transformed input sentence.\\n \\\"\\\"\\\"\\n \\n if sentence == \\\"\\\":\\n return \\\"\\\"\\n\\n sentence = tc.lower_all(sentence)[0]\\n\\n # Change tabs to spaces\\n sentence = re.sub(r\\\"\\\\t+_+\\\", \\\" \\\", sentence)\\n # Change short forms\\n sentence = re.sub(r\\\"\\\\'ve\\\", \\\" have\\\", sentence)\\n sentence = re.sub(r\\\"(can\\\\'t|can not)\\\", \\\"cannot\\\", sentence)\\n sentence = re.sub(r\\\"n\\\\'t\\\", \\\" not\\\", sentence)\\n sentence = re.sub(r\\\"I\\\\'m\\\", \\\"I am\\\", sentence)\\n sentence = re.sub(r\\\" m \\\", \\\" am \\\", sentence)\\n sentence = re.sub(r\\\"(\\\\'re| r )\\\", \\\" are \\\", sentence)\\n sentence = re.sub(r\\\"\\\\'d\\\", \\\" would \\\", sentence)\\n sentence = re.sub(r\\\"\\\\'ll\\\", \\\" will \\\", sentence)\\n sentence = re.sub(r\\\"(\\\\d+)(k)\\\", r\\\"\\\\g<1>000\\\", sentence)\\n # Make word separations\\n sentence = re.sub(r\\\"(\\\\+|-|\\\\*|\\\\/|\\\\^|\\\\.)\\\", \\\" $1 \\\", sentence)\\n # Remove irrelevant stuff, nonprintable characters and spaces\\n sentence = re.sub(r\\\"(\\\\'s|\\\\'S|\\\\'|\\\\\\\"|,|[^ -~]+)\\\", \\\"\\\", sentence)\\n sentence = tc.strip_all(sentence)[0]\\n\\n if sentence == \\\"\\\":\\n return \\\"\\\"\\n\\n return tc.token_it(tc.lemming(sentence))[0]\",\n \"_____no_output_____\"\n ],\n [\n \"for idx in range(len(S1_train_words)):\\n S1_train[idx] = data_tokenizer(S1_train_words[idx])\\n\\nfor idx in range(len(S2_train_words)):\\n S2_train[idx] = data_tokenizer(S2_train_words[idx])\\n \\nfor idx in range(len(S1_test_words)): \\n S1_test[idx] = data_tokenizer(S1_test_words[idx])\\n\\nfor idx in range(len(S2_test_words)): \\n S2_test[idx] = data_tokenizer(S2_test_words[idx])\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"\\n### 1.2 Converting a sentence to a tensor\\n\\nThe next step is to convert every sentence to a tensor, or an array of numbers, using the word embeddings loaded above.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"stop_words = set(stopwords.words('english'))\\n\\n# Converting sentences to vectors. OOV words or stopwords will be discarded.\\nS1_train_vec = np.empty_like(S1_train)\\nfor i in range(len(S1_train)):\\n S1_train_vec[i] = np.zeros((300,))\\n for word in S1_train[i]:\\n if word not in stop_words and word in vec_model.key_to_index:\\n S1_train_vec[i] += vec_model[word]\\n S1_train[i] = S1_train_vec[i] / len(S1_train[i])\\n\\nS2_train_vec = np.empty_like(S2_train)\\nfor i in range(len(S2_train)):\\n S2_train_vec[i] = np.zeros((300,))\\n for word in S2_train[i]:\\n if word not in stop_words and word in vec_model.key_to_index:\\n S2_train_vec[i] += vec_model[word]\\n S2_train[i] = S2_train_vec[i] / len(S2_train[i])\\n\\n\\nS1_test_vec = np.empty_like(S1_test)\\nfor i in range(len(S1_test)):\\n S1_test_vec[i] = np.zeros((300,))\\n for word in S1_test[i]:\\n if word not in stop_words and word in vec_model.key_to_index:\\n S1_test_vec[i] += vec_model[word]\\n S1_test[i] = S1_test_vec[i] / len(S1_test[i])\\n\\nS2_test_vec = np.empty_like(S2_test)\\nfor i in range(len(S2_test)):\\n S2_test_vec[i] = np.zeros((300,))\\n for word in S2_test[i]:\\n if word not in stop_words and word in vec_model.key_to_index:\\n S2_test_vec[i] += vec_model[word]\\n S2_test[i] = S2_test_vec[i] / len(S2_test[i])\",\n \"_____no_output_____\"\n ],\n [\n \"print(\\\"FIRST SENTENCE IN TRAIN SET:\\\\n\\\")\\nprint(S1_train_words[0], \\\"\\\\n\\\") \\nprint(\\\"ENCODED VERSION:\\\")\\nprint(S1_train[0],\\\"\\\\n\\\")\\ndel(S1_train_words)\\ndel(S2_train_words)\\n\\nprint(\\\"FIRST SENTENCE IN TEST SET:\\\\n\\\")\\nprint(S1_test_words[0], \\\"\\\\n\\\")\\nprint(\\\"ENCODED VERSION:\\\")\\nprint(S1_test[0])\\ndel(S1_test_words)\\ndel(S2_test_words)\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"Now, the train set must be split into a training/validation set so that it can be used to train and evaluate the Siamese model.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# Splitting the data\\ncut_off = int(len(S1_train) * .8)\\ntrain_S1, train_S2 = S1_train[: cut_off], S2_train[: cut_off]\\nval_S1, val_S2 = S1_train[cut_off :], S2_train[cut_off :]\\nprint(\\\"Number of duplicate sentences: \\\", len(S1_train))\\nprint(\\\"The length of the training set is: \\\", len(train_S1))\\nprint(\\\"The length of the validation set is: \\\", len(val_S1))\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"\\n### 1.3 Understanding and building the iterator \\n\\nGiven the compational limits, we need to split our data into batches. In this notebook, I built a data generator that takes in $S1$ and $S2$ and returned a batch of size `batch_size` in the following format $([s1_1, s1_2, s1_3, ...]$, $[s2_1, s2_2,s2_3, ...])$. The tuple consists of two arrays and each array has `batch_size` sentences. Again, $s1_i$ and $s2_i$ are duplicates, but they are not duplicates with any other elements in the batch. \\n\\nThe command `next(data_generator)` returns the next batch. This iterator returns a pair of arrays of sentences, which will later be used in the model.\\n\\n**The ideas behind:** \\n- The generator returns shuffled batches of data. To achieve this without modifying the actual sentence lists, a list containing the indexes of the sentences is created. This list can be shuffled and used to get random batches everytime the index is reset.\\n- Append elements of $S1$ and $S2$ to `input1` and `input2` respectively.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"def data_generator(S1, S2, batch_size, shuffle=False):\\n \\\"\\\"\\\"Generator function that yields batches of data\\n\\n Args:\\n S1 (list): List of transformed (to tensor) sentences.\\n S2 (list): List of transformed (to tensor) sentences.\\n batch_size (int): Number of elements per batch.\\n shuffle (bool, optional): If the batches should be randomnized or not. Defaults to False.\\n Yields:\\n tuple: Of the form (input1, input2) with types (numpy.ndarray, numpy.ndarray)\\n NOTE: input1: inputs to your model [s1a, s2a, s3a, ...] i.e. (s1a,s1b) are duplicates\\n input2: targets to your model [s1b, s2b,s3b, ...] i.e. (s1a,s2i) i!=a are not duplicates\\n \\\"\\\"\\\"\\n\\n input1 = []\\n input2 = []\\n idx = 0\\n len_s = len(S1)\\n sentence_indexes = [*range(len_s)]\\n\\n if shuffle:\\n rnd.shuffle(sentence_indexes)\\n\\n while True:\\n if idx >= len_s:\\n # If idx is greater than or equal to len_q, reset it\\n idx = 0\\n # Shuffle to get random batches if shuffle is set to True\\n if shuffle:\\n rnd.shuffle(sentence_indexes)\\n\\n s1 = S1[sentence_indexes[idx]]\\n s2 = S2[sentence_indexes[idx]]\\n\\n idx += 1\\n\\n input1.append(s1)\\n input2.append(s2)\\n\\n if len(input1) == batch_size:\\n b1 = []\\n b2 = []\\n for s1, s2 in zip(input1, input2):\\n # Append s1\\n b1.append(s1)\\n # Append s2\\n b2.append(s2)\\n\\n # Use b1 and b2\\n yield np.array(b1).reshape((batch_size, 1, -1)), np.array(b2).reshape((batch_size, 1, -1))\\n\\n # reset the batches\\n input1, input2 = [], []\",\n \"_____no_output_____\"\n ],\n [\n \"batch_size = 2\\nres1, res2 = next(data_generator(train_S1, train_S2, batch_size))\\nprint(\\\"First sentences :\\\\n\\\", res1, \\\"\\\\n Shape: \\\", res1.shape)\\nprint(\\\"Second sentences :\\\\n\\\", res2, \\\"\\\\n Shape: \\\", res2.shape)\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"Now we can go ahead and start building the neural network, as we have a data generator.\",\n \"_____no_output_____\"\n ],\n [\n \"\\n# Part 2: Defining the Siamese model\\n\\n\\n\\n### 2.1 Understanding and building the Siamese Network \\n\\nA Siamese network is a neural network which uses the same weights while working in tandem on two different input vectors to compute comparable output vectors. The Siamese network model proposed in this notebook looks like this:\\n\\n\\n\\nThe sentences' embeddings are passed to an LSTM layer, the output vectors, $v_1$ and $v_2$, are normalized, and finally a triplet loss is used to get the corresponding cosine similarity for each pair of sentences. The triplet loss makes use of a baseline (anchor) input that is compared to a positive (truthy) input and a negative (falsy) input. The distance from the baseline (anchor) input to the positive (truthy) input is minimized, and the distance from the baseline (anchor) input to the negative (falsy) input is maximized. In math equations, the following is maximized:\\n\\n$$\\\\mathcal{L}(A, P, N)=\\\\max \\\\left(\\\\|\\\\mathrm{f}(A)-\\\\mathrm{f}(P)\\\\|^{2}-\\\\|\\\\mathrm{f}(A)-\\\\mathrm{f}(N)\\\\|^{2}+\\\\alpha, 0\\\\right)$$\\n\\n$A$ is the anchor input, for example $s1_1$, $P$ the duplicate input, for example, $s2_1$, and $N$ the negative input (the non duplicate sentence), for example $s2_2$.
\\n$\\\\alpha$ is a margin - how much the duplicates are pushed from the non duplicates. \\n
\\n\\n**The ideas behind:**\\n- Trax library is used in implementing the model.\\n- `tl.Serial`: Combinator that applies layers serially (by function composition) allowing the set up the overall structure of the feedforward.\\n- `tl.LSTM` The LSTM layer. \\n- `tl.Mean`: Computes the mean across a desired axis. Mean uses one tensor axis to form groups of values and replaces each group with the mean value of that group.\\n- `tl.Fn` Layer with no weights that applies the function f - vector normalization in this case.\\n- `tl.parallel`: It is a combinator layer (like `Serial`) that applies a list of layers in parallel to its inputs.\\n\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"def Siamese(d_model=300):\\n \\\"\\\"\\\"Returns a Siamese model.\\n\\n Args:\\n d_model (int, optional): Depth of the model. Defaults to 128.\\n mode (str, optional): \\\"train\\\", \\\"eval\\\" or \\\"predict\\\", predict mode is for fast inference. Defaults to \\\"train\\\".\\n\\n Returns:\\n trax.layers.combinators.Parallel: A Siamese model. \\n \\\"\\\"\\\"\\n\\n def normalize(x): # normalizes the vectors to have L2 norm 1\\n return x / fastnp.sqrt(fastnp.sum(x * x, axis=-1, keepdims=True))\\n\\n s_processor = tl.Serial( # Processor will run on S1 and S2.\\n tl.LSTM(d_model), # LSTM layer\\n tl.Mean(axis=1), # Mean over columns\\n tl.Fn('Normalize', lambda x: normalize(x)) # Apply normalize function\\n ) # Returns one vector of shape [batch_size, d_model].\\n \\n # Run on S1 and S2 in parallel.\\n model = tl.Parallel(s_processor, s_processor)\\n return model\\n\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"Setup the Siamese network model.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# Check the model\\nmodel = Siamese(d_model=300)\\nprint(model)\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"\\n\\n### 2.2 Implementing Hard Negative Mining\\n\\n\\nNow it's the time to implement the `TripletLoss`.\\nAs explained earlier, loss is composed of two terms. One term utilizes the mean of all the non duplicates, the second utilizes the *closest negative*. The loss expression is then:\\n \\n\\\\begin{align}\\n \\\\mathcal{Loss_1(A,P,N)} &=\\\\max \\\\left( -cos(A,P) + mean_{neg} +\\\\alpha, 0\\\\right) \\\\\\\\\\n \\\\mathcal{Loss_2(A,P,N)} &=\\\\max \\\\left( -cos(A,P) + closest_{neg} +\\\\alpha, 0\\\\right) \\\\\\\\\\n\\\\mathcal{Loss(A,P,N)} &= mean(Loss_1 + Loss_2) \\\\\\\\\\n\\\\end{align}\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"def TripletLossFn(v1, v2, margin=0.25):\\n \\\"\\\"\\\"Custom Loss function.\\n\\n Args:\\n v1 (numpy.ndarray): Array with dimension (batch_size, model_dimension) associated to S1.\\n v2 (numpy.ndarray): Array with dimension (batch_size, model_dimension) associated to S2.\\n margin (float, optional): Desired margin. Defaults to 0.25.\\n\\n Returns:\\n jax.interpreters.xla.DeviceArray: Triplet Loss.\\n \\\"\\\"\\\"\\n\\n scores = fastnp.dot(v1, v2.T) # pairwise cosine sim\\n batch_size = len(scores)\\n\\n positive = fastnp.diagonal(scores) # the positive ones (duplicates)\\n negative_without_positive = scores - 2.0 * fastnp.eye(batch_size)\\n\\n closest_negative = fastnp.max(negative_without_positive, axis=1)\\n negative_zero_on_duplicate = (1.0 - fastnp.eye(batch_size)) * scores\\n mean_negative = fastnp.sum(negative_zero_on_duplicate, axis=1) / (batch_size - 1)\\n\\n triplet_loss1 = fastnp.maximum(0.0, margin - positive + closest_negative)\\n triplet_loss2 = fastnp.maximum(0.0, margin - positive + mean_negative)\\n triplet_loss = fastnp.mean(triplet_loss1 + triplet_loss2)\\n \\n return triplet_loss\",\n \"_____no_output_____\"\n ],\n [\n \"v1 = np.array([[0.26726124, 0.53452248, 0.80178373],[0.5178918 , 0.57543534, 0.63297887]])\\nv2 = np.array([[0.26726124, 0.53452248, 0.80178373],[-0.5178918 , -0.57543534, -0.63297887]])\\nTripletLossFn(v2,v1)\\nprint(\\\"Triplet Loss:\\\", TripletLossFn(v2,v1))\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"**Expected Output:**\\n```CPP\\nTriplet Loss: 1.0\\n``` \",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"from functools import partial\\ndef TripletLoss(margin=1):\\n # Trax layer creation\\n triplet_loss_fn = partial(TripletLossFn, margin=margin)\\n return tl.Fn(\\\"TripletLoss\\\", triplet_loss_fn)\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"\\n\\n# Part 3: Training\\n\\nThe next step is model training - defining the cost function and the optimizer, feeding in the built model. But first I will define the data generators used in the model.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"batch_size = 512\\ntrain_generator = data_generator(train_S1, train_S2, batch_size)\\nval_generator = data_generator(val_S1, val_S2, batch_size)\\nprint(\\\"train_S1.shape \\\", train_S1.shape)\\nprint(\\\"val_S1.shape \\\", val_S1.shape)\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"Now, I will define the training step. Each training iteration is defined as an `epoch`, each epoch being an iteration over all the data, using the training iterator.\\n\\n**The ideas behind:**\\n- Two tasks are needed: `TrainTask` and `EvalTask`.\\n- The training runs in a trax loop `trax.supervised.training.Loop`.\\n- Pass the other parameters to a loop.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"def train_model(Siamese, TripletLoss, lr_schedule, train_generator=train_generator, val_generator=val_generator, output_dir=\\\"trax_model/\\\"):\\n \\\"\\\"\\\"Training the Siamese Model\\n\\n Args:\\n Siamese (function): Function that returns the Siamese model.\\n TripletLoss (function): Function that defines the TripletLoss loss function.\\n lr_schedule (function): Trax multifactor schedule function.\\n train_generator (generator, optional): Training generator. Defaults to train_generator.\\n val_generator (generator, optional): Validation generator. Defaults to val_generator.\\n output_dir (str, optional): Path to save model to. Defaults to \\\"trax_model/\\\".\\n\\n Returns:\\n trax.supervised.training.Loop: Training loop for the model.\\n \\\"\\\"\\\"\\n\\n output_dir = os.path.expanduser(output_dir)\\n\\n train_task = training.TrainTask(\\n labeled_data=train_generator,\\n loss_layer=TripletLoss(),\\n optimizer=trax.optimizers.Adam(0.01),\\n lr_schedule=lr_schedule\\n )\\n\\n eval_task = training.EvalTask(\\n labeled_data=val_generator,\\n metrics=[TripletLoss()]\\n )\\n\\n training_loop = training.Loop(Siamese(),\\n train_task,\\n eval_tasks=[eval_task],\\n output_dir=output_dir,\\n random_seed=34)\\n\\n return training_loop\",\n \"_____no_output_____\"\n ],\n [\n \"train_steps = 1500\\nlr_schedule = trax.lr.warmup_and_rsqrt_decay(400, 0.01)\\ntraining_loop = train_model(Siamese, TripletLoss, lr_schedule)\\ntraining_loop.run(train_steps)\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"\\n\\n# Part 4: Evaluation\",\n \"_____no_output_____\"\n ],\n [\n \"To determine the accuracy of the model, the test set that was configured earlier is used. While the training used only positive examples, the test data, S1_test, S2_test and y_test, is setup as pairs of sentences, some of which are duplicates some are not. \\nThis routine runs all the test sentences pairs through the model, computes the cosine simlarity of each pair, thresholds it and compares the result to y_test - the correct response from the data set. The results are accumulated to produce an accuracy.\\n\\n**The ideas behind:** \\n - The model loops through the incoming data in batch_size chunks.\\n - The output vectors are computed and their cosine similarity is thresholded.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"def classify(test_S1, test_S2, y, threshold, model, data_generator=data_generator, batch_size=64):\\n \\\"\\\"\\\"Function to test the model. Calculates some metrics, such as precision, accuracy, recall and F1 score.\\n\\n Args:\\n test_S1 (numpy.ndarray): Array of S1 sentences.\\n test_S2 (numpy.ndarray): Array of S2 sentences.\\n y (numpy.ndarray): Array of actual target.\\n threshold (float): Desired threshold.\\n model (trax.layers.combinators.Parallel): The Siamese model.\\n data_generator (function): Data generator function. Defaults to data_generator.\\n batch_size (int, optional): Size of the batches. Defaults to 64.\\n\\n Returns:\\n (float, float, float, float): Accuracy, precision, recall and F1 score of the model.\\n \\\"\\\"\\\"\\n\\n true_pos = 0\\n true_neg = 0\\n false_pos = 0\\n false_neg = 0\\n\\n for i in range(0, len(test_S1), batch_size):\\n to_process = len(test_S1) - i\\n\\n if to_process < batch_size:\\n batch_size = to_process\\n\\n s1, s2 = next(data_generator(test_S1[i : i + batch_size], test_S2[i : i + batch_size], batch_size, shuffle=False))\\n y_test = y[i : i + batch_size]\\n\\n v1, v2 = model((s1, s2))\\n\\n for j in range(batch_size):\\n d = np.dot(v1[j], v2[j].T)\\n res = d > threshold\\n\\n if res == 1:\\n if y_test[j] == res:\\n true_pos += 1\\n else:\\n false_pos += 1\\n else:\\n if y_test[j] == res:\\n true_neg += 1\\n else:\\n false_neg += 1\\n\\n accuracy = (true_pos + true_neg) / (true_pos + true_neg + false_pos + false_neg)\\n precision = true_pos / (true_pos + false_pos)\\n recall = true_pos / (true_pos + false_neg)\\n f1_score = 2 * precision * recall / (precision + recall)\\n \\n return (accuracy, precision, recall, f1_score)\",\n \"_____no_output_____\"\n ],\n [\n \"print(len(S1_test))\",\n \"_____no_output_____\"\n ],\n [\n \"# Loading in the saved model\\nmodel = Siamese()\\nmodel.init_from_file(\\\"trax_model/model.pkl.gz\\\")\\n# Evaluating it\\naccuracy, precision, recall, f1_score = classify(S1_test, S2_test, y_test, 0.7, model, batch_size=512) \\nprint(\\\"Accuracy\\\", accuracy)\\nprint(\\\"Precision\\\", precision)\\nprint(\\\"Recall\\\", recall)\\nprint(\\\"F1 score\\\", f1_score)\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"\\n\\n# Part 5: Making predictions\\n\\nIn this section the model will be put to work. It will be wrapped in a function called `predict` which takes two sentences as input and returns $1$ or $0$, depending on whether the pair is a duplicate or not. \\n\\nBut first, we need to embed the sentences.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"def predict(sentence1, sentence2, threshold, model, data_generator=data_generator, verbose=False):\\n \\\"\\\"\\\"Function for predicting if two sentences are duplicates.\\n\\n Args:\\n sentence1 (str): First sentence.\\n sentence2 (str): Second sentence.\\n threshold (float): Desired threshold.\\n model (trax.layers.combinators.Parallel): The Siamese model.\\n data_generator (function): Data generator function. Defaults to data_generator.\\n verbose (bool, optional): If the results should be printed out. Defaults to False.\\n\\n Returns:\\n bool: True if the sentences are duplicates, False otherwise.\\n \\\"\\\"\\\"\\n\\n s1 = data_tokenizer(sentence1) # tokenize\\n S1 = np.zeros((300,))\\n for word in s1:\\n if word not in stop_words and word in vec_model.key_to_index:\\n S1 += vec_model[word]\\n S1 = S1 / len(s1)\\n \\n s2 = data_tokenizer(sentence2) # tokenize\\n S2 = np.zeros((300,))\\n for word in s2:\\n if word not in stop_words and word in vec_model.key_to_index:\\n S1 += vec_model[word]\\n S2 = S2 / len(s2)\\n\\n S1, S2 = next(data_generator([S1], [S2], 1))\\n\\n v1, v2 = model((S1, S2))\\n d = np.dot(v1[0], v2[0].T)\\n res = d > threshold\\n \\n if verbose == True:\\n print(\\\"S1 = \\\", S1, \\\"\\\\nS2 = \\\", S2)\\n print(\\\"d = \\\", d)\\n print(\\\"res = \\\", res)\\n\\n return res\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"Now we can test the model's ability to make predictions.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"sentence1 = \\\"I love running in the park.\\\"\\nsentence2 = \\\"I like running in park?\\\"\\n# 1 means it is duplicated, 0 otherwise\\npredict(sentence1 , sentence2, 0.7, model, verbose=True)\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"The Siamese network is capable of catching complicated structures. Concretely, it can identify sentence duplicates although the sentences do not have many words in common.\",\n \"_____no_output_____\"\n ]\n ]\n]"},"cell_types":{"kind":"list like","value":["markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown"],"string":"[\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\"\n]"},"cell_type_groups":{"kind":"list like","value":[["markdown","markdown"],["code"],["markdown"],["code"],["markdown"],["code","code"],["markdown"],["code"],["markdown"],["code","code","code"],["markdown"],["code","code"],["markdown"],["code"],["markdown"],["code","code"],["markdown","markdown"],["code"],["markdown"],["code"],["markdown"],["code","code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code","code"],["markdown","markdown"],["code","code","code"],["markdown"],["code"],["markdown"],["code"],["markdown"]],"string":"[\n [\n \"markdown\",\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\"\n ],\n [\n \"markdown\",\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\"\n ],\n [\n \"markdown\",\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ]\n]"}}},{"rowIdx":1458802,"cells":{"hexsha":{"kind":"string","value":"e7e2b0e4c66604e869d67cf8d4e44fdb534af2d7"},"size":{"kind":"number","value":30172,"string":"30,172"},"ext":{"kind":"string","value":"ipynb"},"lang":{"kind":"string","value":"Jupyter Notebook"},"max_stars_repo_path":{"kind":"string","value":"notebook/self-study/2.regression.ipynb"},"max_stars_repo_name":{"kind":"string","value":"KangByungWook/tensorflow"},"max_stars_repo_head_hexsha":{"kind":"string","value":"10e553637f9837779352f64deb5986c194bb7bfc"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"notebook/self-study/2.regression.ipynb"},"max_issues_repo_name":{"kind":"string","value":"KangByungWook/tensorflow"},"max_issues_repo_head_hexsha":{"kind":"string","value":"10e553637f9837779352f64deb5986c194bb7bfc"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"notebook/self-study/2.regression.ipynb"},"max_forks_repo_name":{"kind":"string","value":"KangByungWook/tensorflow"},"max_forks_repo_head_hexsha":{"kind":"string","value":"10e553637f9837779352f64deb5986c194bb7bfc"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"avg_line_length":{"kind":"number","value":212.4788732394,"string":"212.478873"},"max_line_length":{"kind":"number","value":16104,"string":"16,104"},"alphanum_fraction":{"kind":"number","value":0.8973220204,"string":"0.897322"},"cells":{"kind":"list like","value":[[["# Regression","_____no_output_____"]],[["import tensorflow as tf\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n# 텐서플로우 시드 설정\ntf.set_random_seed(1)\n\n# 넘파이 랜덤 시드 설정.\nnp.random.seed(1)\n\n# -1부터 1사이값을 100개로 쪼갬.\nx = np.linspace(-1, 1, 100)[:, np.newaxis] # shape (100, 1)\n\n# 0을 평균으로 하고 분산값이 0.1인 값들을 x의 크기만큼 배열로 만든다.\nnoise = np.random.normal(0, 0.1, size=x.shape)\n\n# x^2 + noise 연산.\ny = np.power(x, 2) + noise # shape (100, 1) + some noise\n\n# 차트에 그린다.\nplt.scatter(x, y)\n\n# 차트를 출력\nplt.show()\n\ntf_x = tf.placeholder(tf.float32, x.shape) # input x\ntf_y = tf.placeholder(tf.float32, y.shape) # input y\n\n# hidden layer 생성\n# relu activation function을 사용.\n# tf.layers.dense(입력, 유닛 갯수, acitvation function)\n# hidden유닛 갯수가 많아질수록 곡선이 좀더 유연해진다.\nl1 = tf.layers.dense(tf_x, 10, tf.nn.relu) # hidden layer1\n\nl2 = tf.layers.dense(l1, 5, tf.nn.relu) # hidden layer2\n\n# 출력 노드.\noutput = tf.layers.dense(l2, 1) # output layer\n\n# 노드를 거친 값과 실제값의 차이를 mean_square하여 구한다.\nloss = tf.losses.mean_squared_error(tf_y, output) # compute cost\n\n# optimizer를 생성하고.\noptimizer = tf.train.GradientDescentOptimizer(learning_rate=0.5)\n\n# loss를 최소화하는 방향으로 optimize를 수행한다.\ntrain_op = optimizer.minimize(loss)\n\nsess = tf.Session() # 세션 생성.\nsess.run(tf.global_variables_initializer()) # 그래프의 variable타입을 초기화.\n\nplt.ion() # 새로운 차트를 생성.\nplt.show()\n\n# 학습을 100번 수행.\nfor step in range(100):\n # 실제값과 출력값을 비교하면서 loss를 최소화하는 방향으로 학습을 진행.\n _, l, pred = sess.run([train_op, loss, output], {tf_x: x, tf_y: y})\n if step % 5 == 0:\n # plot and show learning process\n plt.cla()\n plt.scatter(x, y)\n plt.plot(x, pred, 'r-', lw=5)\n plt.text(0.5, 0, 'Loss=%.4f' % l, fontdict={'size': 20, 'color': 'red'})\n # 0.1초 간격으로 시뮬레이션.\n plt.pause(0.1)\n\nplt.ioff()\nplt.show()","_____no_output_____"]]],"string":"[\n [\n [\n \"# Regression\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"import tensorflow as tf\\nimport matplotlib.pyplot as plt\\nimport numpy as np\\n\\n# 텐서플로우 시드 설정\\ntf.set_random_seed(1)\\n\\n# 넘파이 랜덤 시드 설정.\\nnp.random.seed(1)\\n\\n# -1부터 1사이값을 100개로 쪼갬.\\nx = np.linspace(-1, 1, 100)[:, np.newaxis] # shape (100, 1)\\n\\n# 0을 평균으로 하고 분산값이 0.1인 값들을 x의 크기만큼 배열로 만든다.\\nnoise = np.random.normal(0, 0.1, size=x.shape)\\n\\n# x^2 + noise 연산.\\ny = np.power(x, 2) + noise # shape (100, 1) + some noise\\n\\n# 차트에 그린다.\\nplt.scatter(x, y)\\n\\n# 차트를 출력\\nplt.show()\\n\\ntf_x = tf.placeholder(tf.float32, x.shape) # input x\\ntf_y = tf.placeholder(tf.float32, y.shape) # input y\\n\\n# hidden layer 생성\\n# relu activation function을 사용.\\n# tf.layers.dense(입력, 유닛 갯수, acitvation function)\\n# hidden유닛 갯수가 많아질수록 곡선이 좀더 유연해진다.\\nl1 = tf.layers.dense(tf_x, 10, tf.nn.relu) # hidden layer1\\n\\nl2 = tf.layers.dense(l1, 5, tf.nn.relu) # hidden layer2\\n\\n# 출력 노드.\\noutput = tf.layers.dense(l2, 1) # output layer\\n\\n# 노드를 거친 값과 실제값의 차이를 mean_square하여 구한다.\\nloss = tf.losses.mean_squared_error(tf_y, output) # compute cost\\n\\n# optimizer를 생성하고.\\noptimizer = tf.train.GradientDescentOptimizer(learning_rate=0.5)\\n\\n# loss를 최소화하는 방향으로 optimize를 수행한다.\\ntrain_op = optimizer.minimize(loss)\\n\\nsess = tf.Session() # 세션 생성.\\nsess.run(tf.global_variables_initializer()) # 그래프의 variable타입을 초기화.\\n\\nplt.ion() # 새로운 차트를 생성.\\nplt.show()\\n\\n# 학습을 100번 수행.\\nfor step in range(100):\\n # 실제값과 출력값을 비교하면서 loss를 최소화하는 방향으로 학습을 진행.\\n _, l, pred = sess.run([train_op, loss, output], {tf_x: x, tf_y: y})\\n if step % 5 == 0:\\n # plot and show learning process\\n plt.cla()\\n plt.scatter(x, y)\\n plt.plot(x, pred, 'r-', lw=5)\\n plt.text(0.5, 0, 'Loss=%.4f' % l, fontdict={'size': 20, 'color': 'red'})\\n # 0.1초 간격으로 시뮬레이션.\\n plt.pause(0.1)\\n\\nplt.ioff()\\nplt.show()\",\n \"_____no_output_____\"\n ]\n ]\n]"},"cell_types":{"kind":"list like","value":["markdown","code"],"string":"[\n \"markdown\",\n \"code\"\n]"},"cell_type_groups":{"kind":"list like","value":[["markdown"],["code"]],"string":"[\n [\n \"markdown\"\n ],\n [\n \"code\"\n ]\n]"}}},{"rowIdx":1458803,"cells":{"hexsha":{"kind":"string","value":"e7e2b9ce353154539b6970685b0d9c8603ac4511"},"size":{"kind":"number","value":5792,"string":"5,792"},"ext":{"kind":"string","value":"ipynb"},"lang":{"kind":"string","value":"Jupyter Notebook"},"max_stars_repo_path":{"kind":"string","value":"Chapter6_ObjectOriented/vector2d.ipynb"},"max_stars_repo_name":{"kind":"string","value":"Hasideluxe/UdemyPythonPro"},"max_stars_repo_head_hexsha":{"kind":"string","value":"67f3b55ebdacc9024ffbcf4fd4d3268a9d3bd760"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"Chapter6_ObjectOriented/vector2d.ipynb"},"max_issues_repo_name":{"kind":"string","value":"Hasideluxe/UdemyPythonPro"},"max_issues_repo_head_hexsha":{"kind":"string","value":"67f3b55ebdacc9024ffbcf4fd4d3268a9d3bd760"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"Chapter6_ObjectOriented/vector2d.ipynb"},"max_forks_repo_name":{"kind":"string","value":"Hasideluxe/UdemyPythonPro"},"max_forks_repo_head_hexsha":{"kind":"string","value":"67f3b55ebdacc9024ffbcf4fd4d3268a9d3bd760"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"avg_line_length":{"kind":"number","value":24.2343096234,"string":"24.23431"},"max_line_length":{"kind":"number","value":266,"string":"266"},"alphanum_fraction":{"kind":"number","value":0.4625345304,"string":"0.462535"},"cells":{"kind":"list like","value":[[["from math import sqrt\nfrom functools import total_ordering\n\n@total_ordering\nclass Vector2D:\n def __init__(self, x=0, y=0):\n self.x = x\n self.y = y\n\n def __call__(self):\n print(\"Calling the __call__ function!\")\n return self.__repr__()\n\n def __repr__(self):\n return 'vector.Vector2D({}, {})'.format(self.x, self.y)\n\n def __str__(self):\n return '({}, {})'.format(self.x, self.y)\n\n def __bool__(self):\n return bool(abs(self))\n\n def __abs__(self):\n return sqrt(pow(self.x, 2) + pow(self.y, 2))\n\n def __eq__(self, other_vector):\n if self.x == other_vector.x and self.y == other_vector.y:\n return True\n else:\n return False\n\n def __lt__(self, other_vector):\n if abs(self) < abs(other_vector):\n return True\n else:\n return False\n \n def __add__(self, other_vector):\n x = self.x + other_vector.x\n y = self.y + other_vector.y\n return Vector2D(x, y)\n\n def __add__(self, other_vector):\n x = self.x + other_vector.x\n y = self.y + other_vector.y\n return Vector2D(x, y)\n\n def __sub__(self, other_vector):\n x = self.x - other_vector.x\n y = self.y - other_vector.y\n return Vector2D(x, y)\n\n def __mul__(self, other):\n if isinstance(other, Vector2D):\n return self.x * other.x + self.y * other.y\n else:\n return Vector2D(self.x * other, self.y * other)\n\n def __truediv__(self, other):\n return Vector2D(self.x / other, self.y / other)","_____no_output_____"],["v1 = Vector2D(0, 0)\nprint(repr(v1))\nprint(str(v1))\nv2 = Vector2D(1, 1)\nprint(repr(v2))\nprint(str(v2))","vector.Vector2D(0, 0)\n(0, 0)\nvector.Vector2D(1, 1)\n(1, 1)\n"],["print(v1 + v2)\nprint(v1 - v2)\nprint(v1 * v2)\nprint(v2 / 5.0)","(1, 1)\n(-1, -1)\n0\n(0.2, 0.2)\n"],["print(abs(v2))","1.4142135623730951\n"],["print(v1 == v2)\n\nv3 = Vector2D(2, 2)\nv4 = Vector2D(2, 2)\n\nprint(v3 == v4)","False\nTrue\n"],["if v3:\n print(\"yes\")\nif v1:\n print(\"v1 - yes\")\nelse:\n print(\"v1 - no\")","yes\nv1 - no\n"],["v5 = Vector2D(2, 3)\nv6 = Vector2D(-1, 2)\n\nprint(v5 < v6)\nprint(v5 <= v6)\nprint(v5 > v6)\nprint(v5 >= v6)\nprint(v5 == v6)\nprint(v5 != v6)","False\nFalse\nTrue\nTrue\nFalse\nTrue\n"],["v5()","_____no_output_____"]]],"string":"[\n [\n [\n \"from math import sqrt\\nfrom functools import total_ordering\\n\\n@total_ordering\\nclass Vector2D:\\n def __init__(self, x=0, y=0):\\n self.x = x\\n self.y = y\\n\\n def __call__(self):\\n print(\\\"Calling the __call__ function!\\\")\\n return self.__repr__()\\n\\n def __repr__(self):\\n return 'vector.Vector2D({}, {})'.format(self.x, self.y)\\n\\n def __str__(self):\\n return '({}, {})'.format(self.x, self.y)\\n\\n def __bool__(self):\\n return bool(abs(self))\\n\\n def __abs__(self):\\n return sqrt(pow(self.x, 2) + pow(self.y, 2))\\n\\n def __eq__(self, other_vector):\\n if self.x == other_vector.x and self.y == other_vector.y:\\n return True\\n else:\\n return False\\n\\n def __lt__(self, other_vector):\\n if abs(self) < abs(other_vector):\\n return True\\n else:\\n return False\\n \\n def __add__(self, other_vector):\\n x = self.x + other_vector.x\\n y = self.y + other_vector.y\\n return Vector2D(x, y)\\n\\n def __add__(self, other_vector):\\n x = self.x + other_vector.x\\n y = self.y + other_vector.y\\n return Vector2D(x, y)\\n\\n def __sub__(self, other_vector):\\n x = self.x - other_vector.x\\n y = self.y - other_vector.y\\n return Vector2D(x, y)\\n\\n def __mul__(self, other):\\n if isinstance(other, Vector2D):\\n return self.x * other.x + self.y * other.y\\n else:\\n return Vector2D(self.x * other, self.y * other)\\n\\n def __truediv__(self, other):\\n return Vector2D(self.x / other, self.y / other)\",\n \"_____no_output_____\"\n ],\n [\n \"v1 = Vector2D(0, 0)\\nprint(repr(v1))\\nprint(str(v1))\\nv2 = Vector2D(1, 1)\\nprint(repr(v2))\\nprint(str(v2))\",\n \"vector.Vector2D(0, 0)\\n(0, 0)\\nvector.Vector2D(1, 1)\\n(1, 1)\\n\"\n ],\n [\n \"print(v1 + v2)\\nprint(v1 - v2)\\nprint(v1 * v2)\\nprint(v2 / 5.0)\",\n \"(1, 1)\\n(-1, -1)\\n0\\n(0.2, 0.2)\\n\"\n ],\n [\n \"print(abs(v2))\",\n \"1.4142135623730951\\n\"\n ],\n [\n \"print(v1 == v2)\\n\\nv3 = Vector2D(2, 2)\\nv4 = Vector2D(2, 2)\\n\\nprint(v3 == v4)\",\n \"False\\nTrue\\n\"\n ],\n [\n \"if v3:\\n print(\\\"yes\\\")\\nif v1:\\n print(\\\"v1 - yes\\\")\\nelse:\\n print(\\\"v1 - no\\\")\",\n \"yes\\nv1 - no\\n\"\n ],\n [\n \"v5 = Vector2D(2, 3)\\nv6 = Vector2D(-1, 2)\\n\\nprint(v5 < v6)\\nprint(v5 <= v6)\\nprint(v5 > v6)\\nprint(v5 >= v6)\\nprint(v5 == v6)\\nprint(v5 != v6)\",\n \"False\\nFalse\\nTrue\\nTrue\\nFalse\\nTrue\\n\"\n ],\n [\n \"v5()\",\n \"_____no_output_____\"\n ]\n ]\n]"},"cell_types":{"kind":"list like","value":["code"],"string":"[\n \"code\"\n]"},"cell_type_groups":{"kind":"list like","value":[["code","code","code","code","code","code","code","code"]],"string":"[\n [\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\"\n ]\n]"}}},{"rowIdx":1458804,"cells":{"hexsha":{"kind":"string","value":"e7e2c51195667ec4dde3657c95b42a140e8fdd35"},"size":{"kind":"number","value":605006,"string":"605,006"},"ext":{"kind":"string","value":"ipynb"},"lang":{"kind":"string","value":"Jupyter Notebook"},"max_stars_repo_path":{"kind":"string","value":"tutorials/Bert_SQUAD_Interpret.ipynb"},"max_stars_repo_name":{"kind":"string","value":"cspanda/captum"},"max_stars_repo_head_hexsha":{"kind":"string","value":"8ec1f0e48b652676a8690d087751d86a4e9939b4"},"max_stars_repo_licenses":{"kind":"list like","value":["BSD-3-Clause"],"string":"[\n \"BSD-3-Clause\"\n]"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2021-02-03T00:43:17.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2021-02-03T00:43:17.000Z"},"max_issues_repo_path":{"kind":"string","value":"tutorials/Bert_SQUAD_Interpret.ipynb"},"max_issues_repo_name":{"kind":"string","value":"cspanda/captum"},"max_issues_repo_head_hexsha":{"kind":"string","value":"8ec1f0e48b652676a8690d087751d86a4e9939b4"},"max_issues_repo_licenses":{"kind":"list like","value":["BSD-3-Clause"],"string":"[\n \"BSD-3-Clause\"\n]"},"max_issues_count":{"kind":"number","value":9,"string":"9"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2021-06-28T20:24:29.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2022-02-27T09:39:32.000Z"},"max_forks_repo_path":{"kind":"string","value":"tutorials/Bert_SQUAD_Interpret.ipynb"},"max_forks_repo_name":{"kind":"string","value":"cspanda/captum"},"max_forks_repo_head_hexsha":{"kind":"string","value":"8ec1f0e48b652676a8690d087751d86a4e9939b4"},"max_forks_repo_licenses":{"kind":"list like","value":["BSD-3-Clause"],"string":"[\n \"BSD-3-Clause\"\n]"},"max_forks_count":{"kind":"number","value":1,"string":"1"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2021-09-26T01:31:46.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2021-09-26T01:31:46.000Z"},"avg_line_length":{"kind":"number","value":471.9235569423,"string":"471.923557"},"max_line_length":{"kind":"number","value":243852,"string":"243,852"},"alphanum_fraction":{"kind":"number","value":0.9286602116,"string":"0.92866"},"cells":{"kind":"list like","value":[[["# Interpreting BERT Models (Part 1)","_____no_output_____"],["In this notebook we demonstrate how to interpret Bert models using `Captum` library. In this particular case study we focus on a fine-tuned Question Answering model on SQUAD dataset using transformers library from Hugging Face: https://huggingface.co/transformers/\n\nWe show how to use interpretation hooks to examine and better understand embeddings, sub-embeddings, bert, and attention layers. \n\nNote: Before running this tutorial, please install `seaborn`, `pandas` and `matplotlib`, `transformers`(from hugging face) python packages.","_____no_output_____"]],[["import os\nimport sys\n\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\nimport torch\nimport torch.nn as nn\n\nfrom transformers import BertTokenizer, BertForQuestionAnswering, BertConfig\n\nfrom captum.attr import visualization as viz\nfrom captum.attr import IntegratedGradients, LayerConductance, LayerIntegratedGradients\nfrom captum.attr import configure_interpretable_embedding_layer, remove_interpretable_embedding_layer","_____no_output_____"],["device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")","_____no_output_____"]],[["The first step is to fine-tune BERT model on SQUAD dataset. This can be easiy accomplished by following the steps described in hugging face's official web site: https://github.com/huggingface/transformers#run_squadpy-fine-tuning-on-squad-for-question-answering \n\nNote that the fine-tuning is done on a `bert-base-uncased` pre-trained model.","_____no_output_____"],["After we pretrain the model, we can load the tokenizer and pre-trained BERT model using the commands described below. ","_____no_output_____"]],[["# replace with the real path of the saved model\nmodel_path = ''\n\n# load model\nmodel = BertForQuestionAnswering.from_pretrained(model_path)\nmodel.to(device)\nmodel.eval()\nmodel.zero_grad()\n\n# load tokenizer\ntokenizer = BertTokenizer.from_pretrained(model_path)","_____no_output_____"]],[["A helper function to perform forward pass of the model and make predictions.","_____no_output_____"]],[["def predict(inputs, token_type_ids=None, position_ids=None, attention_mask=None):\n return model(inputs, token_type_ids=token_type_ids,\n position_ids=position_ids, attention_mask=attention_mask, )","_____no_output_____"]],[["Defining a custom forward function that will allow us to access the start and end postitions of our prediction using `position` input argument.","_____no_output_____"]],[["def squad_pos_forward_func(inputs, token_type_ids=None, position_ids=None, attention_mask=None, position=0):\n pred = predict(inputs,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n attention_mask=attention_mask)\n pred = pred[position]\n return pred.max(1).values","_____no_output_____"]],[["Let's compute attributions with respect to the `BertEmbeddings` layer.\n\nTo do so, we need to define baselines / references, numericalize both the baselines and the inputs. We will define helper functions to achieve that.\n\nThe cell below defines numericalized special tokens that will be later used for constructing inputs and corresponding baselines/references.","_____no_output_____"]],[["ref_token_id = tokenizer.pad_token_id # A token used for generating token reference\nsep_token_id = tokenizer.sep_token_id # A token used as a separator between question and text and it is also added to the end of the text.\ncls_token_id = tokenizer.cls_token_id # A token used for prepending to the concatenated question-text word sequence","_____no_output_____"]],[["Below we define a set of helper function for constructing references / baselines for word tokens, token types and position ids. We also provide separate helper functions that allow to construct the sub-embeddings and corresponding baselines / references for all sub-embeddings of `BertEmbeddings` layer.","_____no_output_____"]],[["def construct_input_ref_pair(question, text, ref_token_id, sep_token_id, cls_token_id):\n question_ids = tokenizer.encode(question, add_special_tokens=False)\n text_ids = tokenizer.encode(text, add_special_tokens=False)\n\n # construct input token ids\n input_ids = [cls_token_id] + question_ids + [sep_token_id] + text_ids + [sep_token_id]\n\n # construct reference token ids \n ref_input_ids = [cls_token_id] + [ref_token_id] * len(question_ids) + [sep_token_id] + \\\n [ref_token_id] * len(text_ids) + [sep_token_id]\n\n return torch.tensor([input_ids], device=device), torch.tensor([ref_input_ids], device=device), len(question_ids)\n\ndef construct_input_ref_token_type_pair(input_ids, sep_ind=0):\n seq_len = input_ids.size(1)\n token_type_ids = torch.tensor([[0 if i <= sep_ind else 1 for i in range(seq_len)]], device=device)\n ref_token_type_ids = torch.zeros_like(token_type_ids, device=device)# * -1\n return token_type_ids, ref_token_type_ids\n\ndef construct_input_ref_pos_id_pair(input_ids):\n seq_length = input_ids.size(1)\n position_ids = torch.arange(seq_length, dtype=torch.long, device=device)\n # we could potentially also use random permutation with `torch.randperm(seq_length, device=device)`\n ref_position_ids = torch.zeros(seq_length, dtype=torch.long, device=device)\n\n position_ids = position_ids.unsqueeze(0).expand_as(input_ids)\n ref_position_ids = ref_position_ids.unsqueeze(0).expand_as(input_ids)\n return position_ids, ref_position_ids\n \ndef construct_attention_mask(input_ids):\n return torch.ones_like(input_ids)\n\ndef construct_bert_sub_embedding(input_ids, ref_input_ids,\n token_type_ids, ref_token_type_ids,\n position_ids, ref_position_ids):\n input_embeddings = interpretable_embedding1.indices_to_embeddings(input_ids)\n ref_input_embeddings = interpretable_embedding1.indices_to_embeddings(ref_input_ids)\n\n input_embeddings_token_type = interpretable_embedding2.indices_to_embeddings(token_type_ids)\n ref_input_embeddings_token_type = interpretable_embedding2.indices_to_embeddings(ref_token_type_ids)\n\n input_embeddings_position_ids = interpretable_embedding3.indices_to_embeddings(position_ids)\n ref_input_embeddings_position_ids = interpretable_embedding3.indices_to_embeddings(ref_position_ids)\n \n return (input_embeddings, ref_input_embeddings), \\\n (input_embeddings_token_type, ref_input_embeddings_token_type), \\\n (input_embeddings_position_ids, ref_input_embeddings_position_ids)\n \ndef construct_whole_bert_embeddings(input_ids, ref_input_ids, \\\n token_type_ids=None, ref_token_type_ids=None, \\\n position_ids=None, ref_position_ids=None):\n input_embeddings = interpretable_embedding.indices_to_embeddings(input_ids, token_type_ids=token_type_ids, position_ids=position_ids)\n ref_input_embeddings = interpretable_embedding.indices_to_embeddings(ref_input_ids, token_type_ids=token_type_ids, position_ids=position_ids)\n \n return input_embeddings, ref_input_embeddings\n","_____no_output_____"]],[["Let's define the `question - text` pair that we'd like to use as an input for our Bert model and interpret what the model was forcusing on when predicting an answer to the question from given input text ","_____no_output_____"]],[["question, text = \"What is important to us?\", \"It is important to us to include, empower and support humans of all kinds.\"","_____no_output_____"]],[["Let's numericalize the question, the input text and generate corresponding baselines / references for all three sub-embeddings (word, token type and position embeddings) types using our helper functions defined above.","_____no_output_____"]],[["input_ids, ref_input_ids, sep_id = construct_input_ref_pair(question, text, ref_token_id, sep_token_id, cls_token_id)\ntoken_type_ids, ref_token_type_ids = construct_input_ref_token_type_pair(input_ids, sep_id)\nposition_ids, ref_position_ids = construct_input_ref_pos_id_pair(input_ids)\nattention_mask = construct_attention_mask(input_ids)\n\nindices = input_ids[0].detach().tolist()\nall_tokens = tokenizer.convert_ids_to_tokens(indices)","_____no_output_____"]],[["Also, let's define the ground truth for prediction's start and end positions.","_____no_output_____"]],[["ground_truth = 'to include, empower and support humans of all kinds'\n\nground_truth_tokens = tokenizer.encode(ground_truth, add_special_tokens=False)\nground_truth_end_ind = indices.index(ground_truth_tokens[-1])\nground_truth_start_ind = ground_truth_end_ind - len(ground_truth_tokens) + 1","_____no_output_____"]],[["Now let's make predictions using input, token type, position id and a default attention mask.","_____no_output_____"]],[["start_scores, end_scores = predict(input_ids, \\\n token_type_ids=token_type_ids, \\\n position_ids=position_ids, \\\n attention_mask=attention_mask)\n\n\nprint('Question: ', question)\nprint('Predicted Answer: ', ' '.join(all_tokens[torch.argmax(start_scores) : torch.argmax(end_scores)+1]))","Question: What is important to us?\nPredicted Answer: to include , em ##power and support humans of all kinds\n"]],[["There are two different ways of computing the attributions for `BertEmbeddings` layer. One option is to use `LayerIntegratedGradients` and compute the attributions with respect to that layer. The second option is to pre-compute the embeddings and wrap the actual embeddings with `InterpretableEmbeddingBase`. The pre-computation of embeddings for the second option is necessary because integrated gradients scales the inputs and that won't be meaningful on the level of word / token indices.\n\nSince using `LayerIntegratedGradients` is simpler, let's use it here.","_____no_output_____"]],[["lig = LayerIntegratedGradients(squad_pos_forward_func, model.bert.embeddings)\n\nattributions_start, delta_start = lig.attribute(inputs=input_ids,\n baselines=ref_input_ids,\n additional_forward_args=(token_type_ids, position_ids, attention_mask, 0),\n return_convergence_delta=True)\nattributions_end, delta_end = lig.attribute(inputs=input_ids, baselines=ref_input_ids,\n additional_forward_args=(token_type_ids, position_ids, attention_mask, 1),\n return_convergence_delta=True)","_____no_output_____"]],[["A helper function to summarize attributions for each word token in the sequence.","_____no_output_____"]],[["def summarize_attributions(attributions):\n attributions = attributions.sum(dim=-1).squeeze(0)\n attributions = attributions / torch.norm(attributions)\n return attributions","_____no_output_____"],["attributions_start_sum = summarize_attributions(attributions_start)\nattributions_end_sum = summarize_attributions(attributions_end)","_____no_output_____"],["# storing couple samples in an array for visualization purposes\nstart_position_vis = viz.VisualizationDataRecord(\n attributions_start_sum,\n torch.max(torch.softmax(start_scores[0], dim=0)),\n torch.argmax(start_scores),\n torch.argmax(start_scores),\n str(ground_truth_start_ind),\n attributions_start_sum.sum(), \n all_tokens,\n delta_start)\n\nend_position_vis = viz.VisualizationDataRecord(\n attributions_end_sum,\n torch.max(torch.softmax(end_scores[0], dim=0)),\n torch.argmax(end_scores),\n torch.argmax(end_scores),\n str(ground_truth_end_ind),\n attributions_end_sum.sum(), \n all_tokens,\n delta_end)\n\nprint('\\033[1m', 'Visualizations For Start Position', '\\033[0m')\nviz.visualize_text([start_position_vis])\n\nprint('\\033[1m', 'Visualizations For End Position', '\\033[0m')\nviz.visualize_text([end_position_vis])","\u001b[1m Visualizations For Start Position \u001b[0m\n"],["from IPython.display import Image\nImage(filename='img/bert/visuals_of_start_end_predictions.png')","_____no_output_____"]],[["From the results above we can tell that for predicting start position our model is focusing more on the question side. More specifically on the tokens `what` and `important`. It has also slight focus on the token sequence `to us` in the text side.\n\nIn contrast to that, for predicting end position, our model focuses more on the text side and has relative high attribution on the last end position token `kinds`.","_____no_output_____"],["# Multi-Embedding attribution","_____no_output_____"],["Now let's look into the sub-embeddings of `BerEmbeddings` and try to understand the contributions and roles of each of them for both start and end predicted positions.\n\nTo do so, we'd need to place interpretation hooks in each three of them.\n\nNote that we could perform attribution by using `LayerIntegratedGradients` as well but in that case we have to call attribute three times for each sub-layer since currently `LayerIntegratedGradients` takes only a layer at a time. In the future we plan to support multi-layer attribution and will be able to perform attribution by only calling attribute once. \n\n`configure_interpretable_embedding_layer` function will help us to place interpretation hooks on each sub-layer. It returns `InterpretableEmbeddingBase` layer for each sub-embedding and can be used to access the embedding vectors. \n\nNote that we need to remove InterpretableEmbeddingBase wrapper from our model using remove_interpretable_embedding_layer function after we finish interpretation.\n","_____no_output_____"]],[["interpretable_embedding1 = configure_interpretable_embedding_layer(model, 'bert.embeddings.word_embeddings')\ninterpretable_embedding2 = configure_interpretable_embedding_layer(model, 'bert.embeddings.token_type_embeddings')\ninterpretable_embedding3 = configure_interpretable_embedding_layer(model, 'bert.embeddings.position_embeddings')","_____no_output_____"]],[["`BertEmbeddings` has three sub-embeddings, namely, `word_embeddings`, `token_type_embeddings` and `position_embeddings` and this time we would like to attribute to each of them independently.\n`construct_bert_sub_embedding` helper function helps us to construct input embeddings and corresponding references in a separation.","_____no_output_____"]],[["(input_embed, ref_input_embed), (token_type_ids_embed, ref_token_type_ids_embed), (position_ids_embed, ref_position_ids_embed) = construct_bert_sub_embedding(input_ids, ref_input_ids, \\\n token_type_ids=token_type_ids, ref_token_type_ids=ref_token_type_ids, \\\n position_ids=position_ids, ref_position_ids=ref_position_ids)","_____no_output_____"]],[["Now let's create an instance of `IntegratedGradients` and compute the attributions with respect to all those embeddings both for the start and end positions and summarize them for each word token.","_____no_output_____"]],[["ig = IntegratedGradients(squad_pos_forward_func)\n\nattributions_start = ig.attribute(inputs=(input_embed, token_type_ids_embed, position_ids_embed),\n baselines=(ref_input_embed, ref_token_type_ids_embed, ref_position_ids_embed),\n additional_forward_args=(attention_mask, 0))\nattributions_end = ig.attribute(inputs=(input_embed, token_type_ids_embed, position_ids_embed),\n baselines=(ref_input_embed, ref_token_type_ids_embed, ref_position_ids_embed),\n additional_forward_args=(attention_mask, 1))\n\nattributions_start_word = summarize_attributions(attributions_start[0])\nattributions_end_word = summarize_attributions(attributions_end[0])\n\nattributions_start_token_type = summarize_attributions(attributions_start[1])\nattributions_end_token_type = summarize_attributions(attributions_end[1])\n\nattributions_start_position = summarize_attributions(attributions_start[2])\nattributions_end_position = summarize_attributions(attributions_end[2])\n","_____no_output_____"]],[["An auxilary function that will help us to compute topk attributions and corresponding indices","_____no_output_____"]],[["def get_topk_attributed_tokens(attrs, k=5):\n values, indices = torch.topk(attrs, k)\n top_tokens = [all_tokens[idx] for idx in indices]\n return top_tokens, values, indices","_____no_output_____"]],[["Removing interpretation hooks from all layers after finishing attribution.","_____no_output_____"]],[["remove_interpretable_embedding_layer(model, interpretable_embedding1)\nremove_interpretable_embedding_layer(model, interpretable_embedding2)\nremove_interpretable_embedding_layer(model, interpretable_embedding3)","_____no_output_____"]],[["Computing topk attributions for all sub-embeddings and placing them in pandas dataframes for better visualization.","_____no_output_____"]],[["top_words_start, top_words_val_start, top_word_ind_start = get_topk_attributed_tokens(attributions_start_word)\ntop_words_end, top_words_val_end, top_words_ind_end = get_topk_attributed_tokens(attributions_end_word)\n\ntop_token_type_start, top_token_type_val_start, top_token_type_ind_start = get_topk_attributed_tokens(attributions_start_token_type)\ntop_token_type_end, top_token_type_val_end, top_token_type_ind_end = get_topk_attributed_tokens(attributions_end_token_type)\n\ntop_pos_start, top_pos_val_start, pos_ind_start = get_topk_attributed_tokens(attributions_start_position)\ntop_pos_end, top_pos_val_end, pos_ind_end = get_topk_attributed_tokens(attributions_end_position)\n\ndf_start = pd.DataFrame({'Word(Index), Attribution': [\"{} ({}), {}\".format(word, pos, round(val.item(),2)) for word, pos, val in zip(top_words_start, top_word_ind_start, top_words_val_start)],\n 'Token Type(Index), Attribution': [\"{} ({}), {}\".format(ttype, pos, round(val.item(),2)) for ttype, pos, val in zip(top_token_type_start, top_token_type_ind_start, top_words_val_start)],\n 'Position(Index), Attribution': [\"{} ({}), {}\".format(position, pos, round(val.item(),2)) for position, pos, val in zip(top_pos_start, pos_ind_start, top_pos_val_start)]})\ndf_start.style.apply(['cell_ids: False'])\n\ndf_end = pd.DataFrame({'Word(Index), Attribution': [\"{} ({}), {}\".format(word, pos, round(val.item(),2)) for word, pos, val in zip(top_words_end, top_words_ind_end, top_words_val_end)],\n 'Token Type(Index), Attribution': [\"{} ({}), {}\".format(ttype, pos, round(val.item(),2)) for ttype, pos, val in zip(top_token_type_end, top_token_type_ind_end, top_words_val_end)],\n 'Position(Index), Attribution': [\"{} ({}), {}\".format(position, pos, round(val.item(),2)) for position, pos, val in zip(top_pos_end, pos_ind_end, top_pos_val_end)]})\ndf_end.style.apply(['cell_ids: False'])\n\n['{}({})'.format(token, str(i)) for i, token in enumerate(all_tokens)]","_____no_output_____"]],[["Below we can see top 5 attribution results from all three embedding types in predicting start positions.","_____no_output_____"],["#### Top 5 attributed embeddings for start position","_____no_output_____"]],[["df_start","_____no_output_____"]],[["Word embeddings help to focus more on the surrounding tokens of the predicted answer's start position to such as em, ##power and ,. It also has high attribution for the tokens in the question such as what and ?.\n\nIn contrast to to word embedding, token embedding type focuses more on the tokens in the text part such as important,em and start token to.\n\nPosition embedding also has high attribution score for the tokens surrounding to such as us and important. In addition to that, similar to word embedding we observe important tokens from the question.\n\nWe can perform similar analysis, and visualize top 5 attributed tokens for all three embedding types, also for the end position prediction.\n","_____no_output_____"],["#### Top 5 attributed embeddings for end position","_____no_output_____"]],[["df_end","_____no_output_____"]],[["It is interesting to observe high concentration of highly attributed tokens such as `of`, `kinds`, `support` and `##power` for end position prediction.\n\nThe token `kinds`, which is the correct predicted token appears to have high attribution score both according word and position embeddings.\n","_____no_output_____"],["# Interpreting Bert Layers","_____no_output_____"],["Now let's look into the layers of our network. More specifically we would like to look into the distribution of attribution scores for each token across all layers in Bert model and dive deeper into specific tokens. \nWe do that using one of layer attribution algorithms, namely, layer conductance. However, we encourage you to try out and compare the results with other algorithms as well.\n\n\nLet's configure `InterpretableEmbeddingsBase` again, in this case in order to interpret the layers of our model.","_____no_output_____"]],[["interpretable_embedding = configure_interpretable_embedding_layer(model, 'bert.embeddings')","_____no_output_____"]],[["Let's iterate over all layers and compute the attributions for all tokens. In addition to that let's also choose a specific token that we would like to examine in detail, specified by an id `token_to_explain` and store related information in a separate array.\n\n\nNote: Since below code is iterating over all layers it can take over 5 seconds. Please be patient!","_____no_output_____"]],[["layer_attrs_start = []\nlayer_attrs_end = []\n\n# The token that we would like to examine separately.\ntoken_to_explain = 23 # the index of the token that we would like to examine more thoroughly\nlayer_attrs_start_dist = []\nlayer_attrs_end_dist = []\n\ninput_embeddings, ref_input_embeddings = construct_whole_bert_embeddings(input_ids, ref_input_ids, \\\n token_type_ids=token_type_ids, ref_token_type_ids=ref_token_type_ids, \\\n position_ids=position_ids, ref_position_ids=ref_position_ids)\n\nfor i in range(model.config.num_hidden_layers):\n lc = LayerConductance(squad_pos_forward_func, model.bert.encoder.layer[i])\n layer_attributions_start = lc.attribute(inputs=input_embeddings, baselines=ref_input_embeddings, additional_forward_args=(token_type_ids, position_ids,attention_mask, 0))[0]\n layer_attributions_end = lc.attribute(inputs=input_embeddings, baselines=ref_input_embeddings, additional_forward_args=(token_type_ids, position_ids,attention_mask, 1))[0]\n \n layer_attrs_start.append(summarize_attributions(layer_attributions_start).cpu().detach().tolist())\n layer_attrs_end.append(summarize_attributions(layer_attributions_end).cpu().detach().tolist())\n\n # storing attributions of the token id that we would like to examine in more detail in token_to_explain\n layer_attrs_start_dist.append(layer_attributions_start[0,token_to_explain,:].cpu().detach().tolist())\n layer_attrs_end_dist.append(layer_attributions_end[0,token_to_explain,:].cpu().detach().tolist())\n","_____no_output_____"]],[["The plot below represents a heat map of attributions across all layers and tokens for the start position prediction. \nIt is interesting to observe that the question word `what` gains increasingly high attribution from layer one to nine. In the last three layers that importance is slowly diminishing. \nIn contrary to `what` token, many other tokens have negative or close to zero attribution in the first 6 layers. \n\nWe start seeing slightly higher attribution in tokens `important`, `us` and `to`. Interestingly token `em` is also assigned high attribution score which is remarkably high the last three layers.\nAnd lastly, our correctly predicted token `to` for the start position gains increasingly positive attribution has relatively high attribution especially in the last two layers.\n","_____no_output_____"]],[["fig, ax = plt.subplots(figsize=(15,5))\nxticklabels=all_tokens\nyticklabels=list(range(1,13))\nax = sns.heatmap(np.array(layer_attrs_start), xticklabels=xticklabels, yticklabels=yticklabels, linewidth=0.2)\nplt.xlabel('Tokens')\nplt.ylabel('Layers')\nplt.show()","_____no_output_____"]],[["Now let's examine the heat map of the attributions for the end position prediction. In the case of end position prediction we again observe high attribution scores for the token `what` in the last 11 layers.\nThe correctly predicted end token `kinds` has positive attribution across all layers and it is especially prominent in the last two layers.","_____no_output_____"]],[["fig, ax = plt.subplots(figsize=(15,5))\n\nxticklabels=all_tokens\nyticklabels=list(range(1,13))\nax = sns.heatmap(np.array(layer_attrs_end), xticklabels=xticklabels, yticklabels=yticklabels, linewidth=0.2) #, annot=True\nplt.xlabel('Tokens')\nplt.ylabel('Layers')\n\nplt.show()","_____no_output_____"]],[["It is interesting to note that when we compare the heat maps of start and end position, overall the colors for start position prediction on the map have darker intensities. This implies that there are less tokens that attribute positively to the start position prediction and there are more tokens which are negative indicators or signals of start position prediction.","_____no_output_____"],["Now let's dig deeper into specific tokens and look into the distribution of attributions per layer for the token `kinds` in the start and end positions. The box plot diagram below shows the presence of outliers especially in the first four layers and in layer 8. We also observe that for start position prediction interquartile range slowly decreases as we go deeper into the layers and finally it is dimishing.\n\n","_____no_output_____"]],[["fig, ax = plt.subplots(figsize=(20,10))\nax = sns.boxplot(data=layer_attrs_start_dist)\nplt.xlabel('Layers')\nplt.ylabel('Attribution')\nplt.show()","_____no_output_____"]],[["Now let's plot same distribution but for the prediction of the end position. Here attribution has larger positive values across all layers and the interquartile range doesn't change much when moving deeper into the layers.","_____no_output_____"]],[["fig, ax = plt.subplots(figsize=(20,10))\nax = sns.boxplot(data=layer_attrs_end_dist)\nplt.xlabel('Layers')\nplt.ylabel('Attribution')\nplt.show()","_____no_output_____"]],[["Now, let's remove interpretation hooks, since we finished interpretation at this point","_____no_output_____"]],[["remove_interpretable_embedding_layer(model, interpretable_embedding)","_____no_output_____"]],[["In addition to that we can also look into the distribution of attributions in each layer for any input token. This will help us to better understand and compare the distributional patterns of attributions across multiple layers. We can for example represent attributions as a probability density function (pdf) and compute the entropy of it in order to estimate the entropy of attributions in each layer. This can be easily computed using a histogram.","_____no_output_____"]],[["def pdf_attr(attrs, bins=100):\n return np.histogram(attrs, bins=bins, density=True)[0]","_____no_output_____"]],[["In this particular case let's compute the pdf for the attributions at end positions `kinds`. We can however do it for all tokens.\n\nWe will compute and visualize the pdfs and entropies using Shannon's Entropy measure for each layer for token `kinds`.","_____no_output_____"]],[["layer_attrs_end_pdf = map(lambda layer_attrs_end_dist: pdf_attr(layer_attrs_end_dist), layer_attrs_end_dist)\nlayer_attrs_end_pdf = np.array(list(layer_attrs_end_pdf))\n\n# summing attribution along embedding diemension for each layer\n# size: #layers\nattr_sum = np.array(layer_attrs_end_dist).sum(-1)\n\n# size: #layers\nlayer_attrs_end_pdf_norm = np.linalg.norm(layer_attrs_end_pdf, axis=-1, ord=1)\n\n#size: #bins x #layers\nlayer_attrs_end_pdf = np.transpose(layer_attrs_end_pdf)\n\n#size: #bins x #layers\nlayer_attrs_end_pdf = np.divide(layer_attrs_end_pdf, layer_attrs_end_pdf_norm, where=layer_attrs_end_pdf_norm!=0)","_____no_output_____"]],[["The plot below visualizes the probability mass function (pmf) of attributions for each layer for the end position token `kinds`. From the plot we can observe that the distributions are taking bell-curved shapes with different means and variances.\nWe can now use attribution pdfs to compute entropies in the next cell.","_____no_output_____"]],[["fig, ax = plt.subplots(figsize=(20,10))\nplt.plot(layer_attrs_end_pdf)\nplt.xlabel('Bins')\nplt.ylabel('Density')\nplt.legend(['Layer '+ str(i) for i in range(1,13)])\nplt.show()","_____no_output_____"]],[["Below we calculate and visualize attribution entropies based on Shannon entropy measure where the x-axis corresponds to the number of layers and the y-axis corresponds to the total attribution in that layer. The size of the circles for each (layer, total_attribution) pair correspond to the normalized entropy value at that point.\n\nIn this particular example, we observe that the entropy doesn't change much from layer to layer, however in a general case entropy can provide us an intuition about the distributional characteristics of attributions in each layer and can be useful especially when comparing it across multiple tokens.\n","_____no_output_____"]],[["fig, ax = plt.subplots(figsize=(20,10))\n\n# replacing 0s with 1s. np.log(1) = 0 and np.log(0) = -inf\nlayer_attrs_end_pdf[layer_attrs_end_pdf == 0] = 1\nlayer_attrs_end_pdf_log = np.log2(layer_attrs_end_pdf)\n\n# size: #layers\nentropies= -(layer_attrs_end_pdf * layer_attrs_end_pdf_log).sum(0)\n\nplt.scatter(np.arange(12), attr_sum, s=entropies * 100)\nplt.xlabel('Layers')\nplt.ylabel('Total Attribution')\nplt.show()","_____no_output_____"]],[["In the Part 2 of this tutorial we will to go deeper into attention layers, heads and compare the attributions with the attention weight matrices, study and discuss related statistics.","_____no_output_____"]]],"string":"[\n [\n [\n \"# Interpreting BERT Models (Part 1)\",\n \"_____no_output_____\"\n ],\n [\n \"In this notebook we demonstrate how to interpret Bert models using `Captum` library. In this particular case study we focus on a fine-tuned Question Answering model on SQUAD dataset using transformers library from Hugging Face: https://huggingface.co/transformers/\\n\\nWe show how to use interpretation hooks to examine and better understand embeddings, sub-embeddings, bert, and attention layers. \\n\\nNote: Before running this tutorial, please install `seaborn`, `pandas` and `matplotlib`, `transformers`(from hugging face) python packages.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"import os\\nimport sys\\n\\nimport numpy as np\\nimport pandas as pd\\nimport seaborn as sns\\nimport matplotlib.pyplot as plt\\n\\nimport torch\\nimport torch.nn as nn\\n\\nfrom transformers import BertTokenizer, BertForQuestionAnswering, BertConfig\\n\\nfrom captum.attr import visualization as viz\\nfrom captum.attr import IntegratedGradients, LayerConductance, LayerIntegratedGradients\\nfrom captum.attr import configure_interpretable_embedding_layer, remove_interpretable_embedding_layer\",\n \"_____no_output_____\"\n ],\n [\n \"device = torch.device(\\\"cuda:0\\\" if torch.cuda.is_available() else \\\"cpu\\\")\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"The first step is to fine-tune BERT model on SQUAD dataset. This can be easiy accomplished by following the steps described in hugging face's official web site: https://github.com/huggingface/transformers#run_squadpy-fine-tuning-on-squad-for-question-answering \\n\\nNote that the fine-tuning is done on a `bert-base-uncased` pre-trained model.\",\n \"_____no_output_____\"\n ],\n [\n \"After we pretrain the model, we can load the tokenizer and pre-trained BERT model using the commands described below. \",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# replace with the real path of the saved model\\nmodel_path = ''\\n\\n# load model\\nmodel = BertForQuestionAnswering.from_pretrained(model_path)\\nmodel.to(device)\\nmodel.eval()\\nmodel.zero_grad()\\n\\n# load tokenizer\\ntokenizer = BertTokenizer.from_pretrained(model_path)\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"A helper function to perform forward pass of the model and make predictions.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"def predict(inputs, token_type_ids=None, position_ids=None, attention_mask=None):\\n return model(inputs, token_type_ids=token_type_ids,\\n position_ids=position_ids, attention_mask=attention_mask, )\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"Defining a custom forward function that will allow us to access the start and end postitions of our prediction using `position` input argument.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"def squad_pos_forward_func(inputs, token_type_ids=None, position_ids=None, attention_mask=None, position=0):\\n pred = predict(inputs,\\n token_type_ids=token_type_ids,\\n position_ids=position_ids,\\n attention_mask=attention_mask)\\n pred = pred[position]\\n return pred.max(1).values\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"Let's compute attributions with respect to the `BertEmbeddings` layer.\\n\\nTo do so, we need to define baselines / references, numericalize both the baselines and the inputs. We will define helper functions to achieve that.\\n\\nThe cell below defines numericalized special tokens that will be later used for constructing inputs and corresponding baselines/references.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"ref_token_id = tokenizer.pad_token_id # A token used for generating token reference\\nsep_token_id = tokenizer.sep_token_id # A token used as a separator between question and text and it is also added to the end of the text.\\ncls_token_id = tokenizer.cls_token_id # A token used for prepending to the concatenated question-text word sequence\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"Below we define a set of helper function for constructing references / baselines for word tokens, token types and position ids. We also provide separate helper functions that allow to construct the sub-embeddings and corresponding baselines / references for all sub-embeddings of `BertEmbeddings` layer.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"def construct_input_ref_pair(question, text, ref_token_id, sep_token_id, cls_token_id):\\n question_ids = tokenizer.encode(question, add_special_tokens=False)\\n text_ids = tokenizer.encode(text, add_special_tokens=False)\\n\\n # construct input token ids\\n input_ids = [cls_token_id] + question_ids + [sep_token_id] + text_ids + [sep_token_id]\\n\\n # construct reference token ids \\n ref_input_ids = [cls_token_id] + [ref_token_id] * len(question_ids) + [sep_token_id] + \\\\\\n [ref_token_id] * len(text_ids) + [sep_token_id]\\n\\n return torch.tensor([input_ids], device=device), torch.tensor([ref_input_ids], device=device), len(question_ids)\\n\\ndef construct_input_ref_token_type_pair(input_ids, sep_ind=0):\\n seq_len = input_ids.size(1)\\n token_type_ids = torch.tensor([[0 if i <= sep_ind else 1 for i in range(seq_len)]], device=device)\\n ref_token_type_ids = torch.zeros_like(token_type_ids, device=device)# * -1\\n return token_type_ids, ref_token_type_ids\\n\\ndef construct_input_ref_pos_id_pair(input_ids):\\n seq_length = input_ids.size(1)\\n position_ids = torch.arange(seq_length, dtype=torch.long, device=device)\\n # we could potentially also use random permutation with `torch.randperm(seq_length, device=device)`\\n ref_position_ids = torch.zeros(seq_length, dtype=torch.long, device=device)\\n\\n position_ids = position_ids.unsqueeze(0).expand_as(input_ids)\\n ref_position_ids = ref_position_ids.unsqueeze(0).expand_as(input_ids)\\n return position_ids, ref_position_ids\\n \\ndef construct_attention_mask(input_ids):\\n return torch.ones_like(input_ids)\\n\\ndef construct_bert_sub_embedding(input_ids, ref_input_ids,\\n token_type_ids, ref_token_type_ids,\\n position_ids, ref_position_ids):\\n input_embeddings = interpretable_embedding1.indices_to_embeddings(input_ids)\\n ref_input_embeddings = interpretable_embedding1.indices_to_embeddings(ref_input_ids)\\n\\n input_embeddings_token_type = interpretable_embedding2.indices_to_embeddings(token_type_ids)\\n ref_input_embeddings_token_type = interpretable_embedding2.indices_to_embeddings(ref_token_type_ids)\\n\\n input_embeddings_position_ids = interpretable_embedding3.indices_to_embeddings(position_ids)\\n ref_input_embeddings_position_ids = interpretable_embedding3.indices_to_embeddings(ref_position_ids)\\n \\n return (input_embeddings, ref_input_embeddings), \\\\\\n (input_embeddings_token_type, ref_input_embeddings_token_type), \\\\\\n (input_embeddings_position_ids, ref_input_embeddings_position_ids)\\n \\ndef construct_whole_bert_embeddings(input_ids, ref_input_ids, \\\\\\n token_type_ids=None, ref_token_type_ids=None, \\\\\\n position_ids=None, ref_position_ids=None):\\n input_embeddings = interpretable_embedding.indices_to_embeddings(input_ids, token_type_ids=token_type_ids, position_ids=position_ids)\\n ref_input_embeddings = interpretable_embedding.indices_to_embeddings(ref_input_ids, token_type_ids=token_type_ids, position_ids=position_ids)\\n \\n return input_embeddings, ref_input_embeddings\\n\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"Let's define the `question - text` pair that we'd like to use as an input for our Bert model and interpret what the model was forcusing on when predicting an answer to the question from given input text \",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"question, text = \\\"What is important to us?\\\", \\\"It is important to us to include, empower and support humans of all kinds.\\\"\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"Let's numericalize the question, the input text and generate corresponding baselines / references for all three sub-embeddings (word, token type and position embeddings) types using our helper functions defined above.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"input_ids, ref_input_ids, sep_id = construct_input_ref_pair(question, text, ref_token_id, sep_token_id, cls_token_id)\\ntoken_type_ids, ref_token_type_ids = construct_input_ref_token_type_pair(input_ids, sep_id)\\nposition_ids, ref_position_ids = construct_input_ref_pos_id_pair(input_ids)\\nattention_mask = construct_attention_mask(input_ids)\\n\\nindices = input_ids[0].detach().tolist()\\nall_tokens = tokenizer.convert_ids_to_tokens(indices)\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"Also, let's define the ground truth for prediction's start and end positions.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"ground_truth = 'to include, empower and support humans of all kinds'\\n\\nground_truth_tokens = tokenizer.encode(ground_truth, add_special_tokens=False)\\nground_truth_end_ind = indices.index(ground_truth_tokens[-1])\\nground_truth_start_ind = ground_truth_end_ind - len(ground_truth_tokens) + 1\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"Now let's make predictions using input, token type, position id and a default attention mask.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"start_scores, end_scores = predict(input_ids, \\\\\\n token_type_ids=token_type_ids, \\\\\\n position_ids=position_ids, \\\\\\n attention_mask=attention_mask)\\n\\n\\nprint('Question: ', question)\\nprint('Predicted Answer: ', ' '.join(all_tokens[torch.argmax(start_scores) : torch.argmax(end_scores)+1]))\",\n \"Question: What is important to us?\\nPredicted Answer: to include , em ##power and support humans of all kinds\\n\"\n ]\n ],\n [\n [\n \"There are two different ways of computing the attributions for `BertEmbeddings` layer. One option is to use `LayerIntegratedGradients` and compute the attributions with respect to that layer. The second option is to pre-compute the embeddings and wrap the actual embeddings with `InterpretableEmbeddingBase`. The pre-computation of embeddings for the second option is necessary because integrated gradients scales the inputs and that won't be meaningful on the level of word / token indices.\\n\\nSince using `LayerIntegratedGradients` is simpler, let's use it here.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"lig = LayerIntegratedGradients(squad_pos_forward_func, model.bert.embeddings)\\n\\nattributions_start, delta_start = lig.attribute(inputs=input_ids,\\n baselines=ref_input_ids,\\n additional_forward_args=(token_type_ids, position_ids, attention_mask, 0),\\n return_convergence_delta=True)\\nattributions_end, delta_end = lig.attribute(inputs=input_ids, baselines=ref_input_ids,\\n additional_forward_args=(token_type_ids, position_ids, attention_mask, 1),\\n return_convergence_delta=True)\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"A helper function to summarize attributions for each word token in the sequence.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"def summarize_attributions(attributions):\\n attributions = attributions.sum(dim=-1).squeeze(0)\\n attributions = attributions / torch.norm(attributions)\\n return attributions\",\n \"_____no_output_____\"\n ],\n [\n \"attributions_start_sum = summarize_attributions(attributions_start)\\nattributions_end_sum = summarize_attributions(attributions_end)\",\n \"_____no_output_____\"\n ],\n [\n \"# storing couple samples in an array for visualization purposes\\nstart_position_vis = viz.VisualizationDataRecord(\\n attributions_start_sum,\\n torch.max(torch.softmax(start_scores[0], dim=0)),\\n torch.argmax(start_scores),\\n torch.argmax(start_scores),\\n str(ground_truth_start_ind),\\n attributions_start_sum.sum(), \\n all_tokens,\\n delta_start)\\n\\nend_position_vis = viz.VisualizationDataRecord(\\n attributions_end_sum,\\n torch.max(torch.softmax(end_scores[0], dim=0)),\\n torch.argmax(end_scores),\\n torch.argmax(end_scores),\\n str(ground_truth_end_ind),\\n attributions_end_sum.sum(), \\n all_tokens,\\n delta_end)\\n\\nprint('\\\\033[1m', 'Visualizations For Start Position', '\\\\033[0m')\\nviz.visualize_text([start_position_vis])\\n\\nprint('\\\\033[1m', 'Visualizations For End Position', '\\\\033[0m')\\nviz.visualize_text([end_position_vis])\",\n \"\\u001b[1m Visualizations For Start Position \\u001b[0m\\n\"\n ],\n [\n \"from IPython.display import Image\\nImage(filename='img/bert/visuals_of_start_end_predictions.png')\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"From the results above we can tell that for predicting start position our model is focusing more on the question side. More specifically on the tokens `what` and `important`. It has also slight focus on the token sequence `to us` in the text side.\\n\\nIn contrast to that, for predicting end position, our model focuses more on the text side and has relative high attribution on the last end position token `kinds`.\",\n \"_____no_output_____\"\n ],\n [\n \"# Multi-Embedding attribution\",\n \"_____no_output_____\"\n ],\n [\n \"Now let's look into the sub-embeddings of `BerEmbeddings` and try to understand the contributions and roles of each of them for both start and end predicted positions.\\n\\nTo do so, we'd need to place interpretation hooks in each three of them.\\n\\nNote that we could perform attribution by using `LayerIntegratedGradients` as well but in that case we have to call attribute three times for each sub-layer since currently `LayerIntegratedGradients` takes only a layer at a time. In the future we plan to support multi-layer attribution and will be able to perform attribution by only calling attribute once. \\n\\n`configure_interpretable_embedding_layer` function will help us to place interpretation hooks on each sub-layer. It returns `InterpretableEmbeddingBase` layer for each sub-embedding and can be used to access the embedding vectors. \\n\\nNote that we need to remove InterpretableEmbeddingBase wrapper from our model using remove_interpretable_embedding_layer function after we finish interpretation.\\n\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"interpretable_embedding1 = configure_interpretable_embedding_layer(model, 'bert.embeddings.word_embeddings')\\ninterpretable_embedding2 = configure_interpretable_embedding_layer(model, 'bert.embeddings.token_type_embeddings')\\ninterpretable_embedding3 = configure_interpretable_embedding_layer(model, 'bert.embeddings.position_embeddings')\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"`BertEmbeddings` has three sub-embeddings, namely, `word_embeddings`, `token_type_embeddings` and `position_embeddings` and this time we would like to attribute to each of them independently.\\n`construct_bert_sub_embedding` helper function helps us to construct input embeddings and corresponding references in a separation.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"(input_embed, ref_input_embed), (token_type_ids_embed, ref_token_type_ids_embed), (position_ids_embed, ref_position_ids_embed) = construct_bert_sub_embedding(input_ids, ref_input_ids, \\\\\\n token_type_ids=token_type_ids, ref_token_type_ids=ref_token_type_ids, \\\\\\n position_ids=position_ids, ref_position_ids=ref_position_ids)\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"Now let's create an instance of `IntegratedGradients` and compute the attributions with respect to all those embeddings both for the start and end positions and summarize them for each word token.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"ig = IntegratedGradients(squad_pos_forward_func)\\n\\nattributions_start = ig.attribute(inputs=(input_embed, token_type_ids_embed, position_ids_embed),\\n baselines=(ref_input_embed, ref_token_type_ids_embed, ref_position_ids_embed),\\n additional_forward_args=(attention_mask, 0))\\nattributions_end = ig.attribute(inputs=(input_embed, token_type_ids_embed, position_ids_embed),\\n baselines=(ref_input_embed, ref_token_type_ids_embed, ref_position_ids_embed),\\n additional_forward_args=(attention_mask, 1))\\n\\nattributions_start_word = summarize_attributions(attributions_start[0])\\nattributions_end_word = summarize_attributions(attributions_end[0])\\n\\nattributions_start_token_type = summarize_attributions(attributions_start[1])\\nattributions_end_token_type = summarize_attributions(attributions_end[1])\\n\\nattributions_start_position = summarize_attributions(attributions_start[2])\\nattributions_end_position = summarize_attributions(attributions_end[2])\\n\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"An auxilary function that will help us to compute topk attributions and corresponding indices\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"def get_topk_attributed_tokens(attrs, k=5):\\n values, indices = torch.topk(attrs, k)\\n top_tokens = [all_tokens[idx] for idx in indices]\\n return top_tokens, values, indices\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"Removing interpretation hooks from all layers after finishing attribution.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"remove_interpretable_embedding_layer(model, interpretable_embedding1)\\nremove_interpretable_embedding_layer(model, interpretable_embedding2)\\nremove_interpretable_embedding_layer(model, interpretable_embedding3)\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"Computing topk attributions for all sub-embeddings and placing them in pandas dataframes for better visualization.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"top_words_start, top_words_val_start, top_word_ind_start = get_topk_attributed_tokens(attributions_start_word)\\ntop_words_end, top_words_val_end, top_words_ind_end = get_topk_attributed_tokens(attributions_end_word)\\n\\ntop_token_type_start, top_token_type_val_start, top_token_type_ind_start = get_topk_attributed_tokens(attributions_start_token_type)\\ntop_token_type_end, top_token_type_val_end, top_token_type_ind_end = get_topk_attributed_tokens(attributions_end_token_type)\\n\\ntop_pos_start, top_pos_val_start, pos_ind_start = get_topk_attributed_tokens(attributions_start_position)\\ntop_pos_end, top_pos_val_end, pos_ind_end = get_topk_attributed_tokens(attributions_end_position)\\n\\ndf_start = pd.DataFrame({'Word(Index), Attribution': [\\\"{} ({}), {}\\\".format(word, pos, round(val.item(),2)) for word, pos, val in zip(top_words_start, top_word_ind_start, top_words_val_start)],\\n 'Token Type(Index), Attribution': [\\\"{} ({}), {}\\\".format(ttype, pos, round(val.item(),2)) for ttype, pos, val in zip(top_token_type_start, top_token_type_ind_start, top_words_val_start)],\\n 'Position(Index), Attribution': [\\\"{} ({}), {}\\\".format(position, pos, round(val.item(),2)) for position, pos, val in zip(top_pos_start, pos_ind_start, top_pos_val_start)]})\\ndf_start.style.apply(['cell_ids: False'])\\n\\ndf_end = pd.DataFrame({'Word(Index), Attribution': [\\\"{} ({}), {}\\\".format(word, pos, round(val.item(),2)) for word, pos, val in zip(top_words_end, top_words_ind_end, top_words_val_end)],\\n 'Token Type(Index), Attribution': [\\\"{} ({}), {}\\\".format(ttype, pos, round(val.item(),2)) for ttype, pos, val in zip(top_token_type_end, top_token_type_ind_end, top_words_val_end)],\\n 'Position(Index), Attribution': [\\\"{} ({}), {}\\\".format(position, pos, round(val.item(),2)) for position, pos, val in zip(top_pos_end, pos_ind_end, top_pos_val_end)]})\\ndf_end.style.apply(['cell_ids: False'])\\n\\n['{}({})'.format(token, str(i)) for i, token in enumerate(all_tokens)]\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"Below we can see top 5 attribution results from all three embedding types in predicting start positions.\",\n \"_____no_output_____\"\n ],\n [\n \"#### Top 5 attributed embeddings for start position\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"df_start\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"Word embeddings help to focus more on the surrounding tokens of the predicted answer's start position to such as em, ##power and ,. It also has high attribution for the tokens in the question such as what and ?.\\n\\nIn contrast to to word embedding, token embedding type focuses more on the tokens in the text part such as important,em and start token to.\\n\\nPosition embedding also has high attribution score for the tokens surrounding to such as us and important. In addition to that, similar to word embedding we observe important tokens from the question.\\n\\nWe can perform similar analysis, and visualize top 5 attributed tokens for all three embedding types, also for the end position prediction.\\n\",\n \"_____no_output_____\"\n ],\n [\n \"#### Top 5 attributed embeddings for end position\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"df_end\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"It is interesting to observe high concentration of highly attributed tokens such as `of`, `kinds`, `support` and `##power` for end position prediction.\\n\\nThe token `kinds`, which is the correct predicted token appears to have high attribution score both according word and position embeddings.\\n\",\n \"_____no_output_____\"\n ],\n [\n \"# Interpreting Bert Layers\",\n \"_____no_output_____\"\n ],\n [\n \"Now let's look into the layers of our network. More specifically we would like to look into the distribution of attribution scores for each token across all layers in Bert model and dive deeper into specific tokens. \\nWe do that using one of layer attribution algorithms, namely, layer conductance. However, we encourage you to try out and compare the results with other algorithms as well.\\n\\n\\nLet's configure `InterpretableEmbeddingsBase` again, in this case in order to interpret the layers of our model.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"interpretable_embedding = configure_interpretable_embedding_layer(model, 'bert.embeddings')\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"Let's iterate over all layers and compute the attributions for all tokens. In addition to that let's also choose a specific token that we would like to examine in detail, specified by an id `token_to_explain` and store related information in a separate array.\\n\\n\\nNote: Since below code is iterating over all layers it can take over 5 seconds. Please be patient!\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"layer_attrs_start = []\\nlayer_attrs_end = []\\n\\n# The token that we would like to examine separately.\\ntoken_to_explain = 23 # the index of the token that we would like to examine more thoroughly\\nlayer_attrs_start_dist = []\\nlayer_attrs_end_dist = []\\n\\ninput_embeddings, ref_input_embeddings = construct_whole_bert_embeddings(input_ids, ref_input_ids, \\\\\\n token_type_ids=token_type_ids, ref_token_type_ids=ref_token_type_ids, \\\\\\n position_ids=position_ids, ref_position_ids=ref_position_ids)\\n\\nfor i in range(model.config.num_hidden_layers):\\n lc = LayerConductance(squad_pos_forward_func, model.bert.encoder.layer[i])\\n layer_attributions_start = lc.attribute(inputs=input_embeddings, baselines=ref_input_embeddings, additional_forward_args=(token_type_ids, position_ids,attention_mask, 0))[0]\\n layer_attributions_end = lc.attribute(inputs=input_embeddings, baselines=ref_input_embeddings, additional_forward_args=(token_type_ids, position_ids,attention_mask, 1))[0]\\n \\n layer_attrs_start.append(summarize_attributions(layer_attributions_start).cpu().detach().tolist())\\n layer_attrs_end.append(summarize_attributions(layer_attributions_end).cpu().detach().tolist())\\n\\n # storing attributions of the token id that we would like to examine in more detail in token_to_explain\\n layer_attrs_start_dist.append(layer_attributions_start[0,token_to_explain,:].cpu().detach().tolist())\\n layer_attrs_end_dist.append(layer_attributions_end[0,token_to_explain,:].cpu().detach().tolist())\\n\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"The plot below represents a heat map of attributions across all layers and tokens for the start position prediction. \\nIt is interesting to observe that the question word `what` gains increasingly high attribution from layer one to nine. In the last three layers that importance is slowly diminishing. \\nIn contrary to `what` token, many other tokens have negative or close to zero attribution in the first 6 layers. \\n\\nWe start seeing slightly higher attribution in tokens `important`, `us` and `to`. Interestingly token `em` is also assigned high attribution score which is remarkably high the last three layers.\\nAnd lastly, our correctly predicted token `to` for the start position gains increasingly positive attribution has relatively high attribution especially in the last two layers.\\n\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"fig, ax = plt.subplots(figsize=(15,5))\\nxticklabels=all_tokens\\nyticklabels=list(range(1,13))\\nax = sns.heatmap(np.array(layer_attrs_start), xticklabels=xticklabels, yticklabels=yticklabels, linewidth=0.2)\\nplt.xlabel('Tokens')\\nplt.ylabel('Layers')\\nplt.show()\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"Now let's examine the heat map of the attributions for the end position prediction. In the case of end position prediction we again observe high attribution scores for the token `what` in the last 11 layers.\\nThe correctly predicted end token `kinds` has positive attribution across all layers and it is especially prominent in the last two layers.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"fig, ax = plt.subplots(figsize=(15,5))\\n\\nxticklabels=all_tokens\\nyticklabels=list(range(1,13))\\nax = sns.heatmap(np.array(layer_attrs_end), xticklabels=xticklabels, yticklabels=yticklabels, linewidth=0.2) #, annot=True\\nplt.xlabel('Tokens')\\nplt.ylabel('Layers')\\n\\nplt.show()\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"It is interesting to note that when we compare the heat maps of start and end position, overall the colors for start position prediction on the map have darker intensities. This implies that there are less tokens that attribute positively to the start position prediction and there are more tokens which are negative indicators or signals of start position prediction.\",\n \"_____no_output_____\"\n ],\n [\n \"Now let's dig deeper into specific tokens and look into the distribution of attributions per layer for the token `kinds` in the start and end positions. The box plot diagram below shows the presence of outliers especially in the first four layers and in layer 8. We also observe that for start position prediction interquartile range slowly decreases as we go deeper into the layers and finally it is dimishing.\\n\\n\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"fig, ax = plt.subplots(figsize=(20,10))\\nax = sns.boxplot(data=layer_attrs_start_dist)\\nplt.xlabel('Layers')\\nplt.ylabel('Attribution')\\nplt.show()\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"Now let's plot same distribution but for the prediction of the end position. Here attribution has larger positive values across all layers and the interquartile range doesn't change much when moving deeper into the layers.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"fig, ax = plt.subplots(figsize=(20,10))\\nax = sns.boxplot(data=layer_attrs_end_dist)\\nplt.xlabel('Layers')\\nplt.ylabel('Attribution')\\nplt.show()\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"Now, let's remove interpretation hooks, since we finished interpretation at this point\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"remove_interpretable_embedding_layer(model, interpretable_embedding)\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"In addition to that we can also look into the distribution of attributions in each layer for any input token. This will help us to better understand and compare the distributional patterns of attributions across multiple layers. We can for example represent attributions as a probability density function (pdf) and compute the entropy of it in order to estimate the entropy of attributions in each layer. This can be easily computed using a histogram.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"def pdf_attr(attrs, bins=100):\\n return np.histogram(attrs, bins=bins, density=True)[0]\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"In this particular case let's compute the pdf for the attributions at end positions `kinds`. We can however do it for all tokens.\\n\\nWe will compute and visualize the pdfs and entropies using Shannon's Entropy measure for each layer for token `kinds`.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"layer_attrs_end_pdf = map(lambda layer_attrs_end_dist: pdf_attr(layer_attrs_end_dist), layer_attrs_end_dist)\\nlayer_attrs_end_pdf = np.array(list(layer_attrs_end_pdf))\\n\\n# summing attribution along embedding diemension for each layer\\n# size: #layers\\nattr_sum = np.array(layer_attrs_end_dist).sum(-1)\\n\\n# size: #layers\\nlayer_attrs_end_pdf_norm = np.linalg.norm(layer_attrs_end_pdf, axis=-1, ord=1)\\n\\n#size: #bins x #layers\\nlayer_attrs_end_pdf = np.transpose(layer_attrs_end_pdf)\\n\\n#size: #bins x #layers\\nlayer_attrs_end_pdf = np.divide(layer_attrs_end_pdf, layer_attrs_end_pdf_norm, where=layer_attrs_end_pdf_norm!=0)\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"The plot below visualizes the probability mass function (pmf) of attributions for each layer for the end position token `kinds`. From the plot we can observe that the distributions are taking bell-curved shapes with different means and variances.\\nWe can now use attribution pdfs to compute entropies in the next cell.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"fig, ax = plt.subplots(figsize=(20,10))\\nplt.plot(layer_attrs_end_pdf)\\nplt.xlabel('Bins')\\nplt.ylabel('Density')\\nplt.legend(['Layer '+ str(i) for i in range(1,13)])\\nplt.show()\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"Below we calculate and visualize attribution entropies based on Shannon entropy measure where the x-axis corresponds to the number of layers and the y-axis corresponds to the total attribution in that layer. The size of the circles for each (layer, total_attribution) pair correspond to the normalized entropy value at that point.\\n\\nIn this particular example, we observe that the entropy doesn't change much from layer to layer, however in a general case entropy can provide us an intuition about the distributional characteristics of attributions in each layer and can be useful especially when comparing it across multiple tokens.\\n\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"fig, ax = plt.subplots(figsize=(20,10))\\n\\n# replacing 0s with 1s. np.log(1) = 0 and np.log(0) = -inf\\nlayer_attrs_end_pdf[layer_attrs_end_pdf == 0] = 1\\nlayer_attrs_end_pdf_log = np.log2(layer_attrs_end_pdf)\\n\\n# size: #layers\\nentropies= -(layer_attrs_end_pdf * layer_attrs_end_pdf_log).sum(0)\\n\\nplt.scatter(np.arange(12), attr_sum, s=entropies * 100)\\nplt.xlabel('Layers')\\nplt.ylabel('Total Attribution')\\nplt.show()\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"In the Part 2 of this tutorial we will to go deeper into attention layers, heads and compare the attributions with the attention weight matrices, study and discuss related statistics.\",\n \"_____no_output_____\"\n ]\n ]\n]"},"cell_types":{"kind":"list like","value":["markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown"],"string":"[\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\"\n]"},"cell_type_groups":{"kind":"list like","value":[["markdown","markdown"],["code","code"],["markdown","markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code","code","code","code"],["markdown","markdown","markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown","markdown"],["code"],["markdown","markdown"],["code"],["markdown","markdown","markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown","markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"]],"string":"[\n [\n \"markdown\",\n \"markdown\"\n ],\n [\n \"code\",\n \"code\"\n ],\n [\n \"markdown\",\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\",\n \"code\"\n ],\n [\n \"markdown\",\n \"markdown\",\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\",\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\",\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\",\n \"markdown\",\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\",\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ]\n]"}}},{"rowIdx":1458805,"cells":{"hexsha":{"kind":"string","value":"e7e2c856520828e81bdfa488cf290f32e35bf8c6"},"size":{"kind":"number","value":13642,"string":"13,642"},"ext":{"kind":"string","value":"ipynb"},"lang":{"kind":"string","value":"Jupyter Notebook"},"max_stars_repo_path":{"kind":"string","value":"spk_vq_cae.ipynb"},"max_stars_repo_name":{"kind":"string","value":"tong-wu-umn/spike-compression-autoencoder"},"max_stars_repo_head_hexsha":{"kind":"string","value":"83746fbe21c6c8c06291b5ddb0cdf7f37ed00b3d"},"max_stars_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_stars_count":{"kind":"number","value":4,"string":"4"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2020-03-12T09:22:55.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2020-11-25T19:53:40.000Z"},"max_issues_repo_path":{"kind":"string","value":"spk_vq_cae.ipynb"},"max_issues_repo_name":{"kind":"string","value":"tong-wu-umn/spike-compression-autoencoder"},"max_issues_repo_head_hexsha":{"kind":"string","value":"83746fbe21c6c8c06291b5ddb0cdf7f37ed00b3d"},"max_issues_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"spk_vq_cae.ipynb"},"max_forks_repo_name":{"kind":"string","value":"tong-wu-umn/spike-compression-autoencoder"},"max_forks_repo_head_hexsha":{"kind":"string","value":"83746fbe21c6c8c06291b5ddb0cdf7f37ed00b3d"},"max_forks_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"avg_line_length":{"kind":"number","value":30.4508928571,"string":"30.450893"},"max_line_length":{"kind":"number","value":129,"string":"129"},"alphanum_fraction":{"kind":"number","value":0.5574695792,"string":"0.55747"},"cells":{"kind":"list like","value":[[["from __future__ import print_function\nimport numpy as np\nimport time\nimport matplotlib.pyplot as plt\nimport line_profiler\nimport scipy.io as sio\nimport math\nimport collections\n\nimport torch\nfrom torch import optim\nfrom torch.autograd import Variable\nfrom torch.nn import functional as F\nfrom torch.utils.data import Dataset, DataLoader, RandomSampler, BatchSampler\nfrom sklearn.metrics import mean_squared_error\n\nfrom model.model_v2 import spk_vq_vae_resnet\nfrom model.utils import SpikeDataset\n\ngpu = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")","_____no_output_____"]],[["## Parameter Configuration","_____no_output_____"]],[["# %% global parameters\nspk_ch = 4\nspk_dim = 64 # for Wave_Clus\n# spk_dim = 48 # for HC1 and Neuropixels\nlog_interval = 10\nbeta = 0.15\nvq_num = 128\ncardinality = 32\ndropRate = 0.2\nbatch_size = 48\ntest_batch_size = 1000\n\n\"\"\"\norg_dim = param[0]\nconv1_ch = param[1]\nconv2_ch = param[2]\nconv0_ker = param[3]\nconv1_ker = param[4]\nconv2_ker = param[5]\nself.vq_dim = param[6]\nself.vq_num = param[7]\ncardinality = param[8]\ndropRate = param[9]\n\"\"\"\nparam_resnet_v2 = [spk_ch, 256, 16, 1, 3, 1, int(spk_dim/4), vq_num, cardinality, dropRate]","_____no_output_____"]],[["## Preparing data loaders","_____no_output_____"]],[["noise_file = './data/noisy_spks.mat'\nclean_file = './data/clean_spks.mat'\n\nargs = collections.namedtuple\n\n# training set purposely distorted to train denoising autoencoder\nargs.data_path = noise_file\nargs.train_portion = .5\nargs.train_mode = True\ntrain_noise = SpikeDataset(args)\n\n# clean dataset for training\nargs.data_path = clean_file\nargs.train_portion = .5\nargs.train_mode = True\ntrain_clean = SpikeDataset(args)\n\n# noisy datast for training\nargs.data_path = noise_file\nargs.train_portion = .5\nargs.train_mode = False\ntest_noise = SpikeDataset(args)\n\n# clean dataset for testing\nargs.data_path = clean_file\nargs.train_portion = .5\nargs.train_mode = False\ntest_clean = SpikeDataset(args)\n\nbatch_cnt = int(math.ceil(len(train_noise) / batch_size))\n\n# normalization\nd_mean, d_std = train_clean.get_normalizer()\n\ntrain_clean.apply_norm(d_mean, d_std)\ntrain_noise.apply_norm(d_mean, d_std)\ntest_clean.apply_norm(d_mean, d_std)\ntest_noise.apply_norm(d_mean, d_std)","_____no_output_____"]],[["## Model definition","_____no_output_____"]],[["# %% create model\nmodel = spk_vq_vae_resnet(param_resnet_v2).to(gpu)\n\n# %% loss and optimization function\ndef loss_function(recon_x, x, commit_loss, vq_loss):\n recon_loss = F.mse_loss(recon_x, x, reduction='sum')\n return recon_loss + beta * commit_loss + vq_loss, recon_loss\n\noptimizer = optim.Adam(model.parameters(), lr=1e-3, weight_decay=1e-4, amsgrad=True)","_____no_output_____"],["def train(epoch):\n model.train()\n train_loss = 0\n batch_sampler = BatchSampler(RandomSampler(range(len(train_noise))), batch_size=batch_size, drop_last=False)\n for batch_idx, ind in enumerate(batch_sampler):\n in_data = train_noise[ind].to(gpu)\n out_data = train_clean[ind].to(gpu)\n\n optimizer.zero_grad()\n recon_batch, commit_loss, vq_loss = model(in_data)\n loss, recon_loss = loss_function(recon_batch, out_data, commit_loss, vq_loss)\n loss.backward(retain_graph=True)\n model.bwd()\n optimizer.step()\n \n train_loss += recon_loss.item() / (spk_dim * spk_ch)\n if batch_idx % log_interval == 0:\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.4f}'.format(\n epoch, batch_idx * len(in_data), len(train_noise),\n 100. * batch_idx / batch_cnt, recon_loss.item()))\n \n average_train_loss = train_loss / len(train_noise)\n print('====> Epoch: {} Average train loss: {:.5f}'.format(\n epoch, average_train_loss))\n return average_train_loss","_____no_output_____"],["# model logging\nbest_val_loss = 10\ncur_train_loss = 1\ndef save_model(val_loss, train_loss):\n\tglobal best_val_loss, cur_train_loss\n\tif val_loss < best_val_loss:\n\t\tbest_val_loss = val_loss\n\t\tcur_train_loss = train_loss\n\t\ttorch.save(model.state_dict(), './spk_vq_vae_temp.pt')","_____no_output_____"],["def test(epoch, test_mode=True):\n if test_mode:\n model.eval()\n model.embed_reset()\n test_loss = 0\n recon_sig = torch.rand(1, spk_ch, spk_dim)\n org_sig = torch.rand(1, spk_ch, spk_dim)\n with torch.no_grad():\n batch_sampler = BatchSampler(RandomSampler(range(len(test_noise))), batch_size=test_batch_size, drop_last=False)\n for batch_idx, ind in enumerate(batch_sampler):\n in_data = test_noise[ind].to(gpu)\n out_data = test_clean[ind].to(gpu)\n\n recon_batch, commit_loss, vq_loss = model(in_data)\n _, recon_loss = loss_function(recon_batch, out_data, commit_loss, vq_loss)\n \n recon_sig = torch.cat((recon_sig, recon_batch.data.cpu()), dim=0)\n org_sig = torch.cat((org_sig, out_data.data.cpu()), dim=0)\n \n test_loss += recon_loss.item() / (spk_dim * spk_ch)\n\n average_test_loss = test_loss / len(test_noise)\n print('====> Epoch: {} Average test loss: {:.5f}'.format(\n epoch, average_test_loss))\n\n if epoch % 10 == 0:\n plt.figure(figsize=(7,5))\n plt.bar(np.arange(vq_num), model.embed_freq / model.embed_freq.sum())\n plt.ylabel('Probability of Activation', fontsize=16)\n plt.xlabel('Index of codewords', fontsize=16)\n plt.show()\n\n return average_test_loss, recon_sig[1:], org_sig[1:]","_____no_output_____"]],[["## Training","_____no_output_____"]],[["train_loss_history = []\ntest_loss_history = []\n\nepochs = 500\nstart_time = time.time()\n\nfor epoch in range(1, epochs + 1):\n train_loss = train(epoch)\n test_loss, _, _ = test(epoch)\n save_model(test_loss, train_loss)\n \n train_loss_history.append(train_loss)\n test_loss_history.append(test_loss)\n \nprint(\"--- %s seconds ---\" % (time.time() - start_time))\nprint('Minimal train/testing losses are {:.4f} and {:.4f} with index {}\\n'\n .format(cur_train_loss, best_val_loss, test_loss_history.index(min(test_loss_history))))\n\n# plot train and test loss history over epochs\nplt.figure(1)\nepoch_axis = range(1, len(train_loss_history) + 1)\nplt.plot(epoch_axis, train_loss_history, 'bo')\nplt.plot(epoch_axis, test_loss_history, 'b+')\nplt.xlabel('Epochs')\nplt.ylabel('Loss')\nplt.show()","_____no_output_____"]],[["## Result evaluation","_____no_output_____"],["### a. Visualization of mostly used VQ vectors","_____no_output_____"]],[["# select the best performing model\nmodel.load_state_dict(torch.load('./spk_vq_vae_temp.pt'))\n\nembed_idx = np.argsort(model.embed_freq)\nembed_sort = model.embed.weight.data.cpu().numpy()[embed_idx]\n\n# Visualizing activation pattern of VQ codes on testing dataset (the first 8 mostly activated)\nplt.figure()\nn_row, n_col = 1, 8\nf, axarr = plt.subplots(n_row, n_col, figsize=(n_col*2, n_row*2))\nfor i in range(8):\n axarr[i].plot(embed_sort[i], 'r')\n axarr[i].axis('off')\nplt.show()","_____no_output_____"]],[["### b. Compression ratio","_____no_output_____"]],[["# %% spike recon\ntrain_mean, train_std = torch.from_numpy(d_mean), torch.from_numpy(d_std)\n_, val_spks, test_spks = test(10)\n\n# calculate compression ratio\nvq_freq = model.embed_freq / sum(model.embed_freq)\nvq_freq = vq_freq[vq_freq != 0]\nvq_log2 = np.log2(vq_freq)\nbits = -sum(np.multiply(vq_freq, vq_log2))\ncr = spk_ch * spk_dim * 16 / (param_resnet_v2[2] * bits)\nprint('compression ratio is {:.2f} with {:.2f}-bit.'.format(cr, bits))","_____no_output_____"]],[["### c. MSE error","_____no_output_____"]],[["recon_spks = val_spks * train_std + train_mean\ntest_spks_v2 = test_spks * train_std + train_mean\n\nrecon_spks = recon_spks.view(-1, spk_dim)\ntest_spks_v2 = test_spks_v2.view(-1, spk_dim)\n\nrecon_err = torch.norm(recon_spks-test_spks_v2, p=2, dim=1) / torch.norm(test_spks_v2, p=2, dim=1)\n\nprint('mean of recon_err is {:.4f}'.format(torch.mean(recon_err)))\nprint('std of recon_err is {:.4f}'.format(torch.std(recon_err)))","_____no_output_____"]],[["### d. SNDR of reconstructed spikes","_____no_output_____"]],[["recon_spks_new = recon_spks.numpy()\ntest_spks_new = test_spks_v2.numpy()\n\ndef cal_sndr(org_data, recon_data):\n org_norm = np.linalg.norm(org_data, axis=1)\n err_norm = np.linalg.norm(org_data-recon_data, axis=1)\n return np.mean(20*np.log10(org_norm / err_norm)), np.std(20*np.log10(org_norm / err_norm))\n\ncur_sndr, sndr_std = cal_sndr(test_spks_new, recon_spks_new)\nprint('SNDR is {:.4f} with std {:.4f}'.format(cur_sndr, sndr_std))","_____no_output_____"]],[["### e. Visualization of reconstructed spikes chosen at random","_____no_output_____"]],[["rand_val_idx = np.random.permutation(len(recon_spks_new))\n\nplt.figure()\nn_row, n_col = 3, 8\nspks_to_show = test_spks_new[rand_val_idx[:n_row*n_col]]\nymax, ymin = np.amax(spks_to_show), np.amin(spks_to_show)\nf, axarr = plt.subplots(n_row, n_col, figsize=(n_col*3, n_row*3))\nfor i in range(n_row):\n for j in range(n_col):\n axarr[i, j].plot(recon_spks_new[rand_val_idx[i*n_col+j]], 'r')\n axarr[i, j].plot(test_spks_new[rand_val_idx[i*n_col+j]], 'b')\n axarr[i, j].set_ylim([ymin*1.1, ymax*1.1])\n axarr[i, j].axis('off')\nplt.show()","_____no_output_____"]]],"string":"[\n [\n [\n \"from __future__ import print_function\\nimport numpy as np\\nimport time\\nimport matplotlib.pyplot as plt\\nimport line_profiler\\nimport scipy.io as sio\\nimport math\\nimport collections\\n\\nimport torch\\nfrom torch import optim\\nfrom torch.autograd import Variable\\nfrom torch.nn import functional as F\\nfrom torch.utils.data import Dataset, DataLoader, RandomSampler, BatchSampler\\nfrom sklearn.metrics import mean_squared_error\\n\\nfrom model.model_v2 import spk_vq_vae_resnet\\nfrom model.utils import SpikeDataset\\n\\ngpu = torch.device(\\\"cuda:0\\\" if torch.cuda.is_available() else \\\"cpu\\\")\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"## Parameter Configuration\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# %% global parameters\\nspk_ch = 4\\nspk_dim = 64 # for Wave_Clus\\n# spk_dim = 48 # for HC1 and Neuropixels\\nlog_interval = 10\\nbeta = 0.15\\nvq_num = 128\\ncardinality = 32\\ndropRate = 0.2\\nbatch_size = 48\\ntest_batch_size = 1000\\n\\n\\\"\\\"\\\"\\norg_dim = param[0]\\nconv1_ch = param[1]\\nconv2_ch = param[2]\\nconv0_ker = param[3]\\nconv1_ker = param[4]\\nconv2_ker = param[5]\\nself.vq_dim = param[6]\\nself.vq_num = param[7]\\ncardinality = param[8]\\ndropRate = param[9]\\n\\\"\\\"\\\"\\nparam_resnet_v2 = [spk_ch, 256, 16, 1, 3, 1, int(spk_dim/4), vq_num, cardinality, dropRate]\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"## Preparing data loaders\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"noise_file = './data/noisy_spks.mat'\\nclean_file = './data/clean_spks.mat'\\n\\nargs = collections.namedtuple\\n\\n# training set purposely distorted to train denoising autoencoder\\nargs.data_path = noise_file\\nargs.train_portion = .5\\nargs.train_mode = True\\ntrain_noise = SpikeDataset(args)\\n\\n# clean dataset for training\\nargs.data_path = clean_file\\nargs.train_portion = .5\\nargs.train_mode = True\\ntrain_clean = SpikeDataset(args)\\n\\n# noisy datast for training\\nargs.data_path = noise_file\\nargs.train_portion = .5\\nargs.train_mode = False\\ntest_noise = SpikeDataset(args)\\n\\n# clean dataset for testing\\nargs.data_path = clean_file\\nargs.train_portion = .5\\nargs.train_mode = False\\ntest_clean = SpikeDataset(args)\\n\\nbatch_cnt = int(math.ceil(len(train_noise) / batch_size))\\n\\n# normalization\\nd_mean, d_std = train_clean.get_normalizer()\\n\\ntrain_clean.apply_norm(d_mean, d_std)\\ntrain_noise.apply_norm(d_mean, d_std)\\ntest_clean.apply_norm(d_mean, d_std)\\ntest_noise.apply_norm(d_mean, d_std)\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"## Model definition\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# %% create model\\nmodel = spk_vq_vae_resnet(param_resnet_v2).to(gpu)\\n\\n# %% loss and optimization function\\ndef loss_function(recon_x, x, commit_loss, vq_loss):\\n recon_loss = F.mse_loss(recon_x, x, reduction='sum')\\n return recon_loss + beta * commit_loss + vq_loss, recon_loss\\n\\noptimizer = optim.Adam(model.parameters(), lr=1e-3, weight_decay=1e-4, amsgrad=True)\",\n \"_____no_output_____\"\n ],\n [\n \"def train(epoch):\\n model.train()\\n train_loss = 0\\n batch_sampler = BatchSampler(RandomSampler(range(len(train_noise))), batch_size=batch_size, drop_last=False)\\n for batch_idx, ind in enumerate(batch_sampler):\\n in_data = train_noise[ind].to(gpu)\\n out_data = train_clean[ind].to(gpu)\\n\\n optimizer.zero_grad()\\n recon_batch, commit_loss, vq_loss = model(in_data)\\n loss, recon_loss = loss_function(recon_batch, out_data, commit_loss, vq_loss)\\n loss.backward(retain_graph=True)\\n model.bwd()\\n optimizer.step()\\n \\n train_loss += recon_loss.item() / (spk_dim * spk_ch)\\n if batch_idx % log_interval == 0:\\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\\\tLoss: {:.4f}'.format(\\n epoch, batch_idx * len(in_data), len(train_noise),\\n 100. * batch_idx / batch_cnt, recon_loss.item()))\\n \\n average_train_loss = train_loss / len(train_noise)\\n print('====> Epoch: {} Average train loss: {:.5f}'.format(\\n epoch, average_train_loss))\\n return average_train_loss\",\n \"_____no_output_____\"\n ],\n [\n \"# model logging\\nbest_val_loss = 10\\ncur_train_loss = 1\\ndef save_model(val_loss, train_loss):\\n\\tglobal best_val_loss, cur_train_loss\\n\\tif val_loss < best_val_loss:\\n\\t\\tbest_val_loss = val_loss\\n\\t\\tcur_train_loss = train_loss\\n\\t\\ttorch.save(model.state_dict(), './spk_vq_vae_temp.pt')\",\n \"_____no_output_____\"\n ],\n [\n \"def test(epoch, test_mode=True):\\n if test_mode:\\n model.eval()\\n model.embed_reset()\\n test_loss = 0\\n recon_sig = torch.rand(1, spk_ch, spk_dim)\\n org_sig = torch.rand(1, spk_ch, spk_dim)\\n with torch.no_grad():\\n batch_sampler = BatchSampler(RandomSampler(range(len(test_noise))), batch_size=test_batch_size, drop_last=False)\\n for batch_idx, ind in enumerate(batch_sampler):\\n in_data = test_noise[ind].to(gpu)\\n out_data = test_clean[ind].to(gpu)\\n\\n recon_batch, commit_loss, vq_loss = model(in_data)\\n _, recon_loss = loss_function(recon_batch, out_data, commit_loss, vq_loss)\\n \\n recon_sig = torch.cat((recon_sig, recon_batch.data.cpu()), dim=0)\\n org_sig = torch.cat((org_sig, out_data.data.cpu()), dim=0)\\n \\n test_loss += recon_loss.item() / (spk_dim * spk_ch)\\n\\n average_test_loss = test_loss / len(test_noise)\\n print('====> Epoch: {} Average test loss: {:.5f}'.format(\\n epoch, average_test_loss))\\n\\n if epoch % 10 == 0:\\n plt.figure(figsize=(7,5))\\n plt.bar(np.arange(vq_num), model.embed_freq / model.embed_freq.sum())\\n plt.ylabel('Probability of Activation', fontsize=16)\\n plt.xlabel('Index of codewords', fontsize=16)\\n plt.show()\\n\\n return average_test_loss, recon_sig[1:], org_sig[1:]\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"## Training\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"train_loss_history = []\\ntest_loss_history = []\\n\\nepochs = 500\\nstart_time = time.time()\\n\\nfor epoch in range(1, epochs + 1):\\n train_loss = train(epoch)\\n test_loss, _, _ = test(epoch)\\n save_model(test_loss, train_loss)\\n \\n train_loss_history.append(train_loss)\\n test_loss_history.append(test_loss)\\n \\nprint(\\\"--- %s seconds ---\\\" % (time.time() - start_time))\\nprint('Minimal train/testing losses are {:.4f} and {:.4f} with index {}\\\\n'\\n .format(cur_train_loss, best_val_loss, test_loss_history.index(min(test_loss_history))))\\n\\n# plot train and test loss history over epochs\\nplt.figure(1)\\nepoch_axis = range(1, len(train_loss_history) + 1)\\nplt.plot(epoch_axis, train_loss_history, 'bo')\\nplt.plot(epoch_axis, test_loss_history, 'b+')\\nplt.xlabel('Epochs')\\nplt.ylabel('Loss')\\nplt.show()\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"## Result evaluation\",\n \"_____no_output_____\"\n ],\n [\n \"### a. Visualization of mostly used VQ vectors\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# select the best performing model\\nmodel.load_state_dict(torch.load('./spk_vq_vae_temp.pt'))\\n\\nembed_idx = np.argsort(model.embed_freq)\\nembed_sort = model.embed.weight.data.cpu().numpy()[embed_idx]\\n\\n# Visualizing activation pattern of VQ codes on testing dataset (the first 8 mostly activated)\\nplt.figure()\\nn_row, n_col = 1, 8\\nf, axarr = plt.subplots(n_row, n_col, figsize=(n_col*2, n_row*2))\\nfor i in range(8):\\n axarr[i].plot(embed_sort[i], 'r')\\n axarr[i].axis('off')\\nplt.show()\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"### b. Compression ratio\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# %% spike recon\\ntrain_mean, train_std = torch.from_numpy(d_mean), torch.from_numpy(d_std)\\n_, val_spks, test_spks = test(10)\\n\\n# calculate compression ratio\\nvq_freq = model.embed_freq / sum(model.embed_freq)\\nvq_freq = vq_freq[vq_freq != 0]\\nvq_log2 = np.log2(vq_freq)\\nbits = -sum(np.multiply(vq_freq, vq_log2))\\ncr = spk_ch * spk_dim * 16 / (param_resnet_v2[2] * bits)\\nprint('compression ratio is {:.2f} with {:.2f}-bit.'.format(cr, bits))\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"### c. MSE error\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"recon_spks = val_spks * train_std + train_mean\\ntest_spks_v2 = test_spks * train_std + train_mean\\n\\nrecon_spks = recon_spks.view(-1, spk_dim)\\ntest_spks_v2 = test_spks_v2.view(-1, spk_dim)\\n\\nrecon_err = torch.norm(recon_spks-test_spks_v2, p=2, dim=1) / torch.norm(test_spks_v2, p=2, dim=1)\\n\\nprint('mean of recon_err is {:.4f}'.format(torch.mean(recon_err)))\\nprint('std of recon_err is {:.4f}'.format(torch.std(recon_err)))\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"### d. SNDR of reconstructed spikes\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"recon_spks_new = recon_spks.numpy()\\ntest_spks_new = test_spks_v2.numpy()\\n\\ndef cal_sndr(org_data, recon_data):\\n org_norm = np.linalg.norm(org_data, axis=1)\\n err_norm = np.linalg.norm(org_data-recon_data, axis=1)\\n return np.mean(20*np.log10(org_norm / err_norm)), np.std(20*np.log10(org_norm / err_norm))\\n\\ncur_sndr, sndr_std = cal_sndr(test_spks_new, recon_spks_new)\\nprint('SNDR is {:.4f} with std {:.4f}'.format(cur_sndr, sndr_std))\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"### e. Visualization of reconstructed spikes chosen at random\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"rand_val_idx = np.random.permutation(len(recon_spks_new))\\n\\nplt.figure()\\nn_row, n_col = 3, 8\\nspks_to_show = test_spks_new[rand_val_idx[:n_row*n_col]]\\nymax, ymin = np.amax(spks_to_show), np.amin(spks_to_show)\\nf, axarr = plt.subplots(n_row, n_col, figsize=(n_col*3, n_row*3))\\nfor i in range(n_row):\\n for j in range(n_col):\\n axarr[i, j].plot(recon_spks_new[rand_val_idx[i*n_col+j]], 'r')\\n axarr[i, j].plot(test_spks_new[rand_val_idx[i*n_col+j]], 'b')\\n axarr[i, j].set_ylim([ymin*1.1, ymax*1.1])\\n axarr[i, j].axis('off')\\nplt.show()\",\n \"_____no_output_____\"\n ]\n ]\n]"},"cell_types":{"kind":"list like","value":["code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code"],"string":"[\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\"\n]"},"cell_type_groups":{"kind":"list like","value":[["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code","code","code","code"],["markdown"],["code"],["markdown","markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"]],"string":"[\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\",\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ]\n]"}}},{"rowIdx":1458806,"cells":{"hexsha":{"kind":"string","value":"e7e2cb7a740160f932e1b9659282f084bb868199"},"size":{"kind":"number","value":4688,"string":"4,688"},"ext":{"kind":"string","value":"ipynb"},"lang":{"kind":"string","value":"Jupyter Notebook"},"max_stars_repo_path":{"kind":"string","value":"notebooks/local-notebooks/Email-Parsing.ipynb"},"max_stars_repo_name":{"kind":"string","value":"DrSnowbird/tensorflow-python3-jupyter"},"max_stars_repo_head_hexsha":{"kind":"string","value":"54af80e1e401e7e8254fdd15f8d35795b6360710"},"max_stars_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_stars_count":{"kind":"number","value":10,"string":"10"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2018-12-08T02:56:41.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2022-03-09T09:25:03.000Z"},"max_issues_repo_path":{"kind":"string","value":"notebooks/local-notebooks/Email-Parsing.ipynb"},"max_issues_repo_name":{"kind":"string","value":"DrSnowbird/conda-nonroot-docker"},"max_issues_repo_head_hexsha":{"kind":"string","value":"ee4ecc7f4060c22c69cd01ae5114094f8781afc3"},"max_issues_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_issues_count":{"kind":"number","value":1,"string":"1"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2020-01-07T06:27:59.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2020-04-04T17:06:05.000Z"},"max_forks_repo_path":{"kind":"string","value":"notebooks/local-notebooks/Email-Parsing.ipynb"},"max_forks_repo_name":{"kind":"string","value":"DrSnowbird/conda-nonroot-docker"},"max_forks_repo_head_hexsha":{"kind":"string","value":"ee4ecc7f4060c22c69cd01ae5114094f8781afc3"},"max_forks_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_forks_count":{"kind":"number","value":3,"string":"3"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2019-09-23T04:53:26.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2022-03-10T06:05:13.000Z"},"avg_line_length":{"kind":"number","value":63.3513513514,"string":"63.351351"},"max_line_length":{"kind":"number","value":1202,"string":"1,202"},"alphanum_fraction":{"kind":"number","value":0.6435580205,"string":"0.643558"},"cells":{"kind":"list like","value":[[["import email\n\n#msg = email.message_from_string(myString) \n\nf = open('/data/inbox/1.', 'w')\nmsg = email.message_from_file(f)\nf.close()\n\nparser = email.parser.HeaderParser()\nheaders = parser.parsestr(msg.as_string())\n\nfor h in headers.items():\n print(h)","_____no_output_____"]]],"string":"[\n [\n [\n \"import email\\n\\n#msg = email.message_from_string(myString) \\n\\nf = open('/data/inbox/1.', 'w')\\nmsg = email.message_from_file(f)\\nf.close()\\n\\nparser = email.parser.HeaderParser()\\nheaders = parser.parsestr(msg.as_string())\\n\\nfor h in headers.items():\\n print(h)\",\n \"_____no_output_____\"\n ]\n ]\n]"},"cell_types":{"kind":"list like","value":["code"],"string":"[\n \"code\"\n]"},"cell_type_groups":{"kind":"list like","value":[["code"]],"string":"[\n [\n \"code\"\n ]\n]"}}},{"rowIdx":1458807,"cells":{"hexsha":{"kind":"string","value":"e7e2d39e9e33a4e63d7f73beff6a5a9e266c9bf1"},"size":{"kind":"number","value":15400,"string":"15,400"},"ext":{"kind":"string","value":"ipynb"},"lang":{"kind":"string","value":"Jupyter Notebook"},"max_stars_repo_path":{"kind":"string","value":"demo/tentative/demo_centre-building.ipynb"},"max_stars_repo_name":{"kind":"string","value":"suflaj/nemesys"},"max_stars_repo_head_hexsha":{"kind":"string","value":"669fd4fd2e27d1923930643b9b2bed9e1ed600f3"},"max_stars_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"demo/tentative/demo_centre-building.ipynb"},"max_issues_repo_name":{"kind":"string","value":"suflaj/nemesys"},"max_issues_repo_head_hexsha":{"kind":"string","value":"669fd4fd2e27d1923930643b9b2bed9e1ed600f3"},"max_issues_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_issues_count":{"kind":"number","value":23,"string":"23"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2021-10-21T00:46:22.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2022-01-04T00:11:44.000Z"},"max_forks_repo_path":{"kind":"string","value":"demo/tentative/demo_centre-building.ipynb"},"max_forks_repo_name":{"kind":"string","value":"suflaj/nemesys"},"max_forks_repo_head_hexsha":{"kind":"string","value":"669fd4fd2e27d1923930643b9b2bed9e1ed600f3"},"max_forks_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"avg_line_length":{"kind":"number","value":21.3001383126,"string":"21.300138"},"max_line_length":{"kind":"number","value":183,"string":"183"},"alphanum_fraction":{"kind":"number","value":0.5019480519,"string":"0.501948"},"cells":{"kind":"list like","value":[[["# Neural Memory System - Centre building","_____no_output_____"],["## Environment setup","_____no_output_____"]],[["import os\nfrom pathlib import Path","_____no_output_____"],["CURRENT_FOLDER = Path(os.getcwd())","_____no_output_____"],["CD_KEY = \"--CENTRE_BUILDING_DEMO_IN_ROOT\"\n\nif (\n CD_KEY not in os.environ\n or os.environ[CD_KEY] is None\n or len(os.environ[CD_KEY]) == 0\n or os.environ[CD_KEY] == \"false\"\n):\n %cd -q ../../..\n \n ROOT_FOLDER = Path(os.getcwd()).relative_to(os.getcwd())\n CURRENT_FOLDER = CURRENT_FOLDER.relative_to(ROOT_FOLDER.absolute())\n \nos.environ[CD_KEY] = \"true\"","_____no_output_____"],["print(f\"Root folder: {ROOT_FOLDER}\")\nprint(f\"Current folder: {CURRENT_FOLDER}\")","Root folder: .\nCurrent folder: nemesys/demo/tentative\n"]],[["## Modules","_____no_output_____"]],[["from itertools import product\nimport math\nimport struct\n\nimport numpy as np\nimport torch\nimport torch.nn\n\nfrom nemesys.hashing.minhashing.numpy_minhash import NumPyMinHash\nfrom nemesys.modelling.analysers.modules.pytorch_analyser_lstm import PyTorchAnalyserLSTM\nfrom nemesys.modelling.decoders.modules.pytorch_decoder_conv2d import PyTorchDecoderConv2D\nfrom nemesys.modelling.encoders.modules.pytorch_encoder_linear import PyTorchEncoderLinear\nfrom nemesys.modelling.routers.concatenation.minhash.minhash_concatenation_router import (\n MinHashConcatenationRouter\n)\nfrom nemesys.modelling.stores.pytorch_list_store import PyTorchListStore\nfrom nemesys.modelling.synthesisers.modules.pytorch_synthesiser_linear import PyTorchSynthesiserLinear","_____no_output_____"],["torch.set_printoptions(sci_mode=False)","_____no_output_____"]],[["## Components setup","_____no_output_____"],["### Sizes","_____no_output_____"]],[["EMBEDDING_SIZE = 4","_____no_output_____"],["ANALYSER_CLASS_NAMES = (\"statement\",)\nANALYSER_OUTPUT_SIZE = EMBEDDING_SIZE","_____no_output_____"],["ENCODER_OUTPUT_SIZE = 3","_____no_output_____"],["DECODER_IN_CHANNELS = 1\nDECODER_OUT_CHANNELS = 3\nDECODER_KERNEL_SIZE = (1, ENCODER_OUTPUT_SIZE)","_____no_output_____"],["MINHASH_N_PERMUTATIONS = 4\nMINHASH_SEED = 0","_____no_output_____"]],[["### Embedding setup","_____no_output_____"]],[["allowed_letters = [chr(x) for x in range(ord(\"A\"), ord(\"Z\") + 1)]\nvocabulary = [\"\".join(x) for x in product(*([allowed_letters] * 3))]\nword_to_index = {word: i for i, word in enumerate(vocabulary)}","_____no_output_____"],["embedding = torch.nn.Embedding(\n num_embeddings=len(word_to_index),\n embedding_dim=EMBEDDING_SIZE,\n max_norm=math.sqrt(EMBEDDING_SIZE),\n)","_____no_output_____"]],[["### Analyser setup","_____no_output_____"]],[["analyser = PyTorchAnalyserLSTM(\n class_names=ANALYSER_CLASS_NAMES,\n input_size=EMBEDDING_SIZE,\n hidden_size=ANALYSER_OUTPUT_SIZE,\n batch_first=True,\n)","_____no_output_____"]],[["### Encoder setup","_____no_output_____"]],[["encoder = PyTorchEncoderLinear(\n in_features=ANALYSER_OUTPUT_SIZE,\n out_features=ENCODER_OUTPUT_SIZE,\n content_key=\"content\",\n)","_____no_output_____"]],[["### Store setup","_____no_output_____"]],[["store = PyTorchListStore()","_____no_output_____"]],[["### Decoder setup","_____no_output_____"]],[["decoder = PyTorchDecoderConv2D(\n in_channels = DECODER_IN_CHANNELS,\n out_channels = DECODER_OUT_CHANNELS,\n kernel_size = DECODER_KERNEL_SIZE,\n)","_____no_output_____"]],[["### Router setup","_____no_output_____"],["#### MinHash setup","_____no_output_____"]],[["def tensor_to_numpy(x: torch.Tensor):\n x = x.reshape((x.shape[0], -1)) # Preserve batches\n x = np.array(x, dtype=np.float32)\n \n return x\n\n\ndef preprocess_function(element):\n element_as_bytes = struct.pack(\")\n"]],[["### Analyser run","_____no_output_____"]],[["analyser_output = analyser(embeddings.reshape(len(inputs), 1, -1))\n\nfor class_name in ANALYSER_CLASS_NAMES:\n print(f\"{class_name}:\")\n print(analyser_output[class_name][\"content\"])","statement:\ntensor([[ 0.0044, 0.1220, -0.0175, -0.2743],\n [-0.0601, -0.0471, -0.1271, 0.2379],\n [-0.1024, 0.0011, -0.0486, 0.1101]], grad_fn=)\n"]],[["### Encoder run","_____no_output_____"]],[["encoder_output = encoder(analyser_output[\"statement\"])\nprint(encoder_output)","{'content': tensor([[ 0.1218, -0.0339, 0.0212],\n [-0.0465, 0.0300, 0.0055],\n [-0.0192, -0.0080, 0.0010]], grad_fn=)}\n"]],[["### Store run","_____no_output_____"]],[["store.append(encoder_output[\"content\"])\nprint(store)","[tensor([[ 0.1218, -0.0339, 0.0212],\n [-0.0465, 0.0300, 0.0055],\n [-0.0192, -0.0080, 0.0010]])]\n"]],[["### Decoder run","_____no_output_____"]],[["decoder_output = decoder(store)\nprint(decoder_output)","{'content': tensor([[[[ 0.4530],\n [ 0.5088],\n [ 0.4872]],\n\n [[ 0.4030],\n [ 0.4953],\n [ 0.4768]],\n\n [[-0.1114],\n [-0.0418],\n [-0.0639]]]], grad_fn=)}\n"]],[["### Router run","_____no_output_____"]],[["router_input = decoder_output[\"content\"].squeeze(dim=0)","_____no_output_____"],["router_output = router(router_input)","/tmp/ipykernel_27340/3174470834.py:10: DeprecationWarning: The binary mode of fromstring is deprecated, as it behaves surprisingly on unicode inputs. Use frombuffer instead\n element_as_int = np.fromstring(\n"],["router_output = numpy_to_tensor(router_output)\nprint(router_output)","tensor([[0.4881, 0.8373, 0.6695, 0.0170, 0.7944, 0.6838, 0.8554, 0.2267, 0.2587,\n 0.0597, 0.3019, 0.7954],\n [0.8945, 0.7646, 0.3306, 0.5174, 0.5584, 0.1036, 0.1118, 0.1061, 0.2825,\n 0.2752, 0.6448, 0.2994],\n [0.9799, 0.3582, 0.8451, 0.7665, 0.2921, 0.9583, 0.6857, 0.8045, 0.6184,\n 0.1507, 0.1753, 0.6196]])\n"]],[["### Synthesiser run","_____no_output_____"]]],"string":"[\n [\n [\n \"# Neural Memory System - Centre building\",\n \"_____no_output_____\"\n ],\n [\n \"## Environment setup\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"import os\\nfrom pathlib import Path\",\n \"_____no_output_____\"\n ],\n [\n \"CURRENT_FOLDER = Path(os.getcwd())\",\n \"_____no_output_____\"\n ],\n [\n \"CD_KEY = \\\"--CENTRE_BUILDING_DEMO_IN_ROOT\\\"\\n\\nif (\\n CD_KEY not in os.environ\\n or os.environ[CD_KEY] is None\\n or len(os.environ[CD_KEY]) == 0\\n or os.environ[CD_KEY] == \\\"false\\\"\\n):\\n %cd -q ../../..\\n \\n ROOT_FOLDER = Path(os.getcwd()).relative_to(os.getcwd())\\n CURRENT_FOLDER = CURRENT_FOLDER.relative_to(ROOT_FOLDER.absolute())\\n \\nos.environ[CD_KEY] = \\\"true\\\"\",\n \"_____no_output_____\"\n ],\n [\n \"print(f\\\"Root folder: {ROOT_FOLDER}\\\")\\nprint(f\\\"Current folder: {CURRENT_FOLDER}\\\")\",\n \"Root folder: .\\nCurrent folder: nemesys/demo/tentative\\n\"\n ]\n ],\n [\n [\n \"## Modules\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"from itertools import product\\nimport math\\nimport struct\\n\\nimport numpy as np\\nimport torch\\nimport torch.nn\\n\\nfrom nemesys.hashing.minhashing.numpy_minhash import NumPyMinHash\\nfrom nemesys.modelling.analysers.modules.pytorch_analyser_lstm import PyTorchAnalyserLSTM\\nfrom nemesys.modelling.decoders.modules.pytorch_decoder_conv2d import PyTorchDecoderConv2D\\nfrom nemesys.modelling.encoders.modules.pytorch_encoder_linear import PyTorchEncoderLinear\\nfrom nemesys.modelling.routers.concatenation.minhash.minhash_concatenation_router import (\\n MinHashConcatenationRouter\\n)\\nfrom nemesys.modelling.stores.pytorch_list_store import PyTorchListStore\\nfrom nemesys.modelling.synthesisers.modules.pytorch_synthesiser_linear import PyTorchSynthesiserLinear\",\n \"_____no_output_____\"\n ],\n [\n \"torch.set_printoptions(sci_mode=False)\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"## Components setup\",\n \"_____no_output_____\"\n ],\n [\n \"### Sizes\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"EMBEDDING_SIZE = 4\",\n \"_____no_output_____\"\n ],\n [\n \"ANALYSER_CLASS_NAMES = (\\\"statement\\\",)\\nANALYSER_OUTPUT_SIZE = EMBEDDING_SIZE\",\n \"_____no_output_____\"\n ],\n [\n \"ENCODER_OUTPUT_SIZE = 3\",\n \"_____no_output_____\"\n ],\n [\n \"DECODER_IN_CHANNELS = 1\\nDECODER_OUT_CHANNELS = 3\\nDECODER_KERNEL_SIZE = (1, ENCODER_OUTPUT_SIZE)\",\n \"_____no_output_____\"\n ],\n [\n \"MINHASH_N_PERMUTATIONS = 4\\nMINHASH_SEED = 0\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"### Embedding setup\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"allowed_letters = [chr(x) for x in range(ord(\\\"A\\\"), ord(\\\"Z\\\") + 1)]\\nvocabulary = [\\\"\\\".join(x) for x in product(*([allowed_letters] * 3))]\\nword_to_index = {word: i for i, word in enumerate(vocabulary)}\",\n \"_____no_output_____\"\n ],\n [\n \"embedding = torch.nn.Embedding(\\n num_embeddings=len(word_to_index),\\n embedding_dim=EMBEDDING_SIZE,\\n max_norm=math.sqrt(EMBEDDING_SIZE),\\n)\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"### Analyser setup\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"analyser = PyTorchAnalyserLSTM(\\n class_names=ANALYSER_CLASS_NAMES,\\n input_size=EMBEDDING_SIZE,\\n hidden_size=ANALYSER_OUTPUT_SIZE,\\n batch_first=True,\\n)\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"### Encoder setup\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"encoder = PyTorchEncoderLinear(\\n in_features=ANALYSER_OUTPUT_SIZE,\\n out_features=ENCODER_OUTPUT_SIZE,\\n content_key=\\\"content\\\",\\n)\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"### Store setup\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"store = PyTorchListStore()\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"### Decoder setup\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"decoder = PyTorchDecoderConv2D(\\n in_channels = DECODER_IN_CHANNELS,\\n out_channels = DECODER_OUT_CHANNELS,\\n kernel_size = DECODER_KERNEL_SIZE,\\n)\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"### Router setup\",\n \"_____no_output_____\"\n ],\n [\n \"#### MinHash setup\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"def tensor_to_numpy(x: torch.Tensor):\\n x = x.reshape((x.shape[0], -1)) # Preserve batches\\n x = np.array(x, dtype=np.float32)\\n \\n return x\\n\\n\\ndef preprocess_function(element):\\n element_as_bytes = struct.pack(\\\")\\n\"\n ]\n ],\n [\n [\n \"### Analyser run\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"analyser_output = analyser(embeddings.reshape(len(inputs), 1, -1))\\n\\nfor class_name in ANALYSER_CLASS_NAMES:\\n print(f\\\"{class_name}:\\\")\\n print(analyser_output[class_name][\\\"content\\\"])\",\n \"statement:\\ntensor([[ 0.0044, 0.1220, -0.0175, -0.2743],\\n [-0.0601, -0.0471, -0.1271, 0.2379],\\n [-0.1024, 0.0011, -0.0486, 0.1101]], grad_fn=)\\n\"\n ]\n ],\n [\n [\n \"### Encoder run\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"encoder_output = encoder(analyser_output[\\\"statement\\\"])\\nprint(encoder_output)\",\n \"{'content': tensor([[ 0.1218, -0.0339, 0.0212],\\n [-0.0465, 0.0300, 0.0055],\\n [-0.0192, -0.0080, 0.0010]], grad_fn=)}\\n\"\n ]\n ],\n [\n [\n \"### Store run\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"store.append(encoder_output[\\\"content\\\"])\\nprint(store)\",\n \"[tensor([[ 0.1218, -0.0339, 0.0212],\\n [-0.0465, 0.0300, 0.0055],\\n [-0.0192, -0.0080, 0.0010]])]\\n\"\n ]\n ],\n [\n [\n \"### Decoder run\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"decoder_output = decoder(store)\\nprint(decoder_output)\",\n \"{'content': tensor([[[[ 0.4530],\\n [ 0.5088],\\n [ 0.4872]],\\n\\n [[ 0.4030],\\n [ 0.4953],\\n [ 0.4768]],\\n\\n [[-0.1114],\\n [-0.0418],\\n [-0.0639]]]], grad_fn=)}\\n\"\n ]\n ],\n [\n [\n \"### Router run\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"router_input = decoder_output[\\\"content\\\"].squeeze(dim=0)\",\n \"_____no_output_____\"\n ],\n [\n \"router_output = router(router_input)\",\n \"/tmp/ipykernel_27340/3174470834.py:10: DeprecationWarning: The binary mode of fromstring is deprecated, as it behaves surprisingly on unicode inputs. Use frombuffer instead\\n element_as_int = np.fromstring(\\n\"\n ],\n [\n \"router_output = numpy_to_tensor(router_output)\\nprint(router_output)\",\n \"tensor([[0.4881, 0.8373, 0.6695, 0.0170, 0.7944, 0.6838, 0.8554, 0.2267, 0.2587,\\n 0.0597, 0.3019, 0.7954],\\n [0.8945, 0.7646, 0.3306, 0.5174, 0.5584, 0.1036, 0.1118, 0.1061, 0.2825,\\n 0.2752, 0.6448, 0.2994],\\n [0.9799, 0.3582, 0.8451, 0.7665, 0.2921, 0.9583, 0.6857, 0.8045, 0.6184,\\n 0.1507, 0.1753, 0.6196]])\\n\"\n ]\n ],\n [\n [\n \"### Synthesiser run\",\n \"_____no_output_____\"\n ]\n ]\n]"},"cell_types":{"kind":"list like","value":["markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown"],"string":"[\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\"\n]"},"cell_type_groups":{"kind":"list like","value":[["markdown","markdown"],["code","code","code","code"],["markdown"],["code","code"],["markdown","markdown"],["code","code","code","code","code"],["markdown"],["code","code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown","markdown"],["code","code"],["markdown"],["code"],["markdown","markdown","markdown"],["code","code","code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code","code","code"],["markdown"]],"string":"[\n [\n \"markdown\",\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\"\n ],\n [\n \"markdown\",\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\",\n \"markdown\"\n ],\n [\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\",\n \"markdown\",\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ]\n]"}}},{"rowIdx":1458808,"cells":{"hexsha":{"kind":"string","value":"e7e2dfd26469131c297b433d5b28f8aa76c056b5"},"size":{"kind":"number","value":19325,"string":"19,325"},"ext":{"kind":"string","value":"ipynb"},"lang":{"kind":"string","value":"Jupyter Notebook"},"max_stars_repo_path":{"kind":"string","value":"00_quickstart/09_Detect_Model_Bias_Clarify.ipynb"},"max_stars_repo_name":{"kind":"string","value":"MarcusFra/workshop"},"max_stars_repo_head_hexsha":{"kind":"string","value":"83f16d41f5e10f9c23242066f77a14bb61ac78d7"},"max_stars_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_stars_count":{"kind":"number","value":2327,"string":"2,327"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2020-03-01T09:47:34.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2021-11-25T12:38:42.000Z"},"max_issues_repo_path":{"kind":"string","value":"00_quickstart/09_Detect_Model_Bias_Clarify.ipynb"},"max_issues_repo_name":{"kind":"string","value":"MarcusFra/workshop"},"max_issues_repo_head_hexsha":{"kind":"string","value":"83f16d41f5e10f9c23242066f77a14bb61ac78d7"},"max_issues_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_issues_count":{"kind":"number","value":209,"string":"209"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2020-03-01T17:14:12.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2021-11-08T20:35:42.000Z"},"max_forks_repo_path":{"kind":"string","value":"00_quickstart/09_Detect_Model_Bias_Clarify.ipynb"},"max_forks_repo_name":{"kind":"string","value":"MarcusFra/workshop"},"max_forks_repo_head_hexsha":{"kind":"string","value":"83f16d41f5e10f9c23242066f77a14bb61ac78d7"},"max_forks_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_forks_count":{"kind":"number","value":686,"string":"686"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2020-03-03T17:24:51.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2021-11-25T23:39:12.000Z"},"avg_line_length":{"kind":"number","value":28.9730134933,"string":"28.973013"},"max_line_length":{"kind":"number","value":381,"string":"381"},"alphanum_fraction":{"kind":"number","value":0.5887192755,"string":"0.588719"},"cells":{"kind":"list like","value":[[["# Detect Model Bias with Amazon SageMaker Clarify","_____no_output_____"],["\n## Amazon Science: _[How Clarify helps machine learning developers detect unintended bias](https://www.amazon.science/latest-news/how-clarify-helps-machine-learning-developers-detect-unintended-bias)_ \n\n[](https://www.amazon.science/latest-news/how-clarify-helps-machine-learning-developers-detect-unintended-bias)","_____no_output_____"],["# Terminology\n\n* **Bias**: \nAn imbalance in the training data or the prediction behavior of the model across different groups, such as age or income bracket. Biases can result from the data or algorithm used to train your model. For instance, if an ML model is trained primarily on data from middle-aged individuals, it may be less accurate when making predictions involving younger and older people.\n\n* **Bias metric**: \nA function that returns numerical values indicating the level of a potential bias.\n\n* **Bias report**:\nA collection of bias metrics for a given dataset, or a combination of a dataset and a model.\n\n* **Label**:\nFeature that is the target for training a machine learning model. Referred to as the observed label or observed outcome.\n\n* **Positive label values**:\nLabel values that are favorable to a demographic group observed in a sample. In other words, designates a sample as having a positive result.\n\n* **Negative label values**:\nLabel values that are unfavorable to a demographic group observed in a sample. In other words, designates a sample as having a negative result.\n\n* **Facet**:\nA column or feature that contains the attributes with respect to which bias is measured.\n\n* **Facet value**:\nThe feature values of attributes that bias might favor or disfavor.","_____no_output_____"],["# Posttraining Bias Metrics\nhttps://docs.aws.amazon.com/sagemaker/latest/dg/clarify-measure-post-training-bias.html\n\n* **Difference in Positive Proportions in Predicted Labels (DPPL)**:\nMeasures the difference in the proportion of positive predictions between the favored facet a and the disfavored facet d.\n\n* **Disparate Impact (DI)**:\nMeasures the ratio of proportions of the predicted labels for the favored facet a and the disfavored facet d.\n\n* **Difference in Conditional Acceptance (DCAcc)**:\nCompares the observed labels to the labels predicted by a model and assesses whether this is the same across facets for predicted positive outcomes (acceptances).\n\n* **Difference in Conditional Rejection (DCR)**:\nCompares the observed labels to the labels predicted by a model and assesses whether this is the same across facets for negative outcomes (rejections).\n\n* **Recall Difference (RD)**:\nCompares the recall of the model for the favored and disfavored facets.\n\n* **Difference in Acceptance Rates (DAR)**:\nMeasures the difference in the ratios of the observed positive outcomes (TP) to the predicted positives (TP + FP) between the favored and disfavored facets.\n\n* **Difference in Rejection Rates (DRR)**:\nMeasures the difference in the ratios of the observed negative outcomes (TN) to the predicted negatives (TN + FN) between the disfavored and favored facets.\n\n* **Accuracy Difference (AD)**:\nMeasures the difference between the prediction accuracy for the favored and disfavored facets.\n\n* **Treatment Equality (TE)**:\nMeasures the difference in the ratio of false positives to false negatives between the favored and disfavored facets.\n\n* **Conditional Demographic Disparity in Predicted Labels (CDDPL)**:\nMeasures the disparity of predicted labels between the facets as a whole, but also by subgroups.\n\n* **Counterfactual Fliptest (FT)**:\nExamines each member of facet d and assesses whether similar members of facet a have different model predictions.\n","_____no_output_____"]],[["import boto3\nimport sagemaker\nimport pandas as pd\nimport numpy as np\n\nsess = sagemaker.Session()\nbucket = sess.default_bucket()\nregion = boto3.Session().region_name\n\nimport botocore.config\n\nconfig = botocore.config.Config(\n user_agent_extra='dsoaws/1.0'\n)\n\nsm = boto3.Session().client(service_name=\"sagemaker\", \n region_name=region,\n config=config)","_____no_output_____"],["%store -r role","_____no_output_____"],["import matplotlib.pyplot as plt\n\n%matplotlib inline\n%config InlineBackend.figure_format='retina'","_____no_output_____"]],[["# Test data for bias\n\nWe created test data in JSONLines format to match the model inputs. ","_____no_output_____"]],[["test_data_bias_path = \"./data-clarify/test_data_bias.jsonl\"","_____no_output_____"],["!head -n 1 $test_data_bias_path","_____no_output_____"]],[["### Upload the data","_____no_output_____"]],[["test_data_bias_s3_uri = sess.upload_data(bucket=bucket, key_prefix=\"bias/test_data_bias\", path=test_data_bias_path)\ntest_data_bias_s3_uri","_____no_output_____"],["!aws s3 ls $test_data_bias_s3_uri","_____no_output_____"],["%store test_data_bias_s3_uri","_____no_output_____"]],[["# Run Posttraining Model Bias Analysis","_____no_output_____"]],[["%store -r pipeline_name","_____no_output_____"],["print(pipeline_name)","_____no_output_____"],["%%time\n\nimport time\nfrom pprint import pprint\n\nexecutions_response = sm.list_pipeline_executions(PipelineName=pipeline_name)[\"PipelineExecutionSummaries\"]\npipeline_execution_status = executions_response[0][\"PipelineExecutionStatus\"]\nprint(pipeline_execution_status)\n\nwhile pipeline_execution_status == \"Executing\":\n try:\n executions_response = sm.list_pipeline_executions(PipelineName=pipeline_name)[\"PipelineExecutionSummaries\"]\n pipeline_execution_status = executions_response[0][\"PipelineExecutionStatus\"]\n except Exception as e:\n print(\"Please wait...\")\n time.sleep(30)\n\npprint(executions_response)","_____no_output_____"]],[["# List Pipeline Execution Steps\n","_____no_output_____"]],[["pipeline_execution_status = executions_response[0][\"PipelineExecutionStatus\"]\nprint(pipeline_execution_status)","_____no_output_____"],["pipeline_execution_arn = executions_response[0][\"PipelineExecutionArn\"]\nprint(pipeline_execution_arn)","_____no_output_____"],["from pprint import pprint\n\nsteps = sm.list_pipeline_execution_steps(PipelineExecutionArn=pipeline_execution_arn)\n\npprint(steps)","_____no_output_____"]],[["# View Created Model\n_Note: If the trained model did not pass the Evaluation step (> accuracy threshold), it will not be created._","_____no_output_____"]],[["for execution_step in steps[\"PipelineExecutionSteps\"]:\n if execution_step[\"StepName\"] == \"CreateModel\":\n model_arn = execution_step[\"Metadata\"][\"Model\"][\"Arn\"]\n break\nprint(model_arn)\n\npipeline_model_name = model_arn.split(\"/\")[-1]\nprint(pipeline_model_name)","_____no_output_____"]],[["# SageMakerClarifyProcessor","_____no_output_____"]],[["from sagemaker import clarify\n\nclarify_processor = clarify.SageMakerClarifyProcessor(\n role=role, \n instance_count=1, \n instance_type=\"ml.c5.2xlarge\", \n sagemaker_session=sess\n)","_____no_output_____"]],[["# Writing DataConfig and ModelConfig\nA `DataConfig` object communicates some basic information about data I/O to Clarify. We specify where to find the input dataset, where to store the output, the target column (`label`), the header names, and the dataset type.\n\nSimilarly, the `ModelConfig` object communicates information about your trained model and `ModelPredictedLabelConfig` provides information on the format of your predictions. \n\n**Note**: To avoid additional traffic to your production models, SageMaker Clarify sets up and tears down a dedicated endpoint when processing. `ModelConfig` specifies your preferred instance type and instance count used to run your model on during Clarify's processing.","_____no_output_____"],["## DataConfig","_____no_output_____"]],[["bias_report_prefix = \"bias/report-{}\".format(pipeline_model_name)\n\nbias_report_output_path = \"s3://{}/{}\".format(bucket, bias_report_prefix)\n\ndata_config = clarify.DataConfig(\n s3_data_input_path=test_data_bias_s3_uri,\n s3_output_path=bias_report_output_path,\n label=\"star_rating\",\n features=\"features\",\n # label must be last, features in exact order as passed into model\n headers=[\"review_body\", \"product_category\", \"star_rating\"],\n dataset_type=\"application/jsonlines\",\n)","_____no_output_____"]],[["## ModelConfig","_____no_output_____"]],[["model_config = clarify.ModelConfig(\n model_name=pipeline_model_name,\n instance_type=\"ml.m5.4xlarge\",\n instance_count=1,\n content_type=\"application/jsonlines\",\n accept_type=\"application/jsonlines\",\n # {\"features\": [\"the worst\", \"Digital_Software\"]}\n content_template='{\"features\":$features}',\n)","_____no_output_____"]],[["## _Note: `label` is set to the JSON key for the model prediction results_","_____no_output_____"]],[["predictions_config = clarify.ModelPredictedLabelConfig(label=\"predicted_label\")","_____no_output_____"]],[["## BiasConfig","_____no_output_____"]],[["bias_config = clarify.BiasConfig(\n label_values_or_threshold=[\n 5,\n 4,\n ], # needs to be int or str for continuous dtype, needs to be >1 for categorical dtype\n facet_name=\"product_category\",\n)","_____no_output_____"]],[["# Run Clarify Job","_____no_output_____"]],[["clarify_processor.run_post_training_bias(\n data_config=data_config,\n data_bias_config=bias_config,\n model_config=model_config,\n model_predicted_label_config=predictions_config,\n # methods='all', # FlipTest requires all columns to be numeric\n methods=[\"DPPL\", \"DI\", \"DCA\", \"DCR\", \"RD\", \"DAR\", \"DRR\", \"AD\", \"TE\"],\n wait=False,\n logs=False,\n)","_____no_output_____"],["run_post_training_bias_processing_job_name = clarify_processor.latest_job.job_name\nrun_post_training_bias_processing_job_name","_____no_output_____"],["from IPython.core.display import display, HTML\n\ndisplay(\n HTML(\n 'Review Processing Job'.format(\n region, run_post_training_bias_processing_job_name\n )\n )\n)","_____no_output_____"],["from IPython.core.display import display, HTML\n\ndisplay(\n HTML(\n 'Review CloudWatch Logs After About 5 Minutes'.format(\n region, run_post_training_bias_processing_job_name\n )\n )\n)","_____no_output_____"],["from IPython.core.display import display, HTML\n\ndisplay(\n HTML(\n 'Review S3 Output Data After The Processing Job Has Completed'.format(\n bucket, bias_report_prefix\n )\n )\n)","_____no_output_____"],["from pprint import pprint\n\nrunning_processor = sagemaker.processing.ProcessingJob.from_processing_name(\n processing_job_name=run_post_training_bias_processing_job_name, sagemaker_session=sess\n)\n\nprocessing_job_description = running_processor.describe()\n\npprint(processing_job_description)","_____no_output_____"],["running_processor.wait(logs=False)","_____no_output_____"]],[["# Download Report From S3","_____no_output_____"]],[["!aws s3 ls $bias_report_output_path/","_____no_output_____"],["!aws s3 cp --recursive $bias_report_output_path ./generated_bias_report/","_____no_output_____"],["from IPython.core.display import display, HTML\n\ndisplay(HTML('Review Bias Report'))","_____no_output_____"]],[["# View Bias Report in Studio\nIn Studio, you can view the results under the experiments tab.\n\n\n\nEach bias metric has detailed explanations with examples that you can explore.\n\n\n\nYou could also summarize the results in a handy table!\n\n","_____no_output_____"],["# Release Resources","_____no_output_____"]],[["%%html\n\n

Shutting down your kernel for this notebook to release resources.

\n\n \n","_____no_output_____"]]],"string":"[\n [\n [\n \"# Detect Model Bias with Amazon SageMaker Clarify\",\n \"_____no_output_____\"\n ],\n [\n \"\\n## Amazon Science: _[How Clarify helps machine learning developers detect unintended bias](https://www.amazon.science/latest-news/how-clarify-helps-machine-learning-developers-detect-unintended-bias)_ \\n\\n[](https://www.amazon.science/latest-news/how-clarify-helps-machine-learning-developers-detect-unintended-bias)\",\n \"_____no_output_____\"\n ],\n [\n \"# Terminology\\n\\n* **Bias**: \\nAn imbalance in the training data or the prediction behavior of the model across different groups, such as age or income bracket. Biases can result from the data or algorithm used to train your model. For instance, if an ML model is trained primarily on data from middle-aged individuals, it may be less accurate when making predictions involving younger and older people.\\n\\n* **Bias metric**: \\nA function that returns numerical values indicating the level of a potential bias.\\n\\n* **Bias report**:\\nA collection of bias metrics for a given dataset, or a combination of a dataset and a model.\\n\\n* **Label**:\\nFeature that is the target for training a machine learning model. Referred to as the observed label or observed outcome.\\n\\n* **Positive label values**:\\nLabel values that are favorable to a demographic group observed in a sample. In other words, designates a sample as having a positive result.\\n\\n* **Negative label values**:\\nLabel values that are unfavorable to a demographic group observed in a sample. In other words, designates a sample as having a negative result.\\n\\n* **Facet**:\\nA column or feature that contains the attributes with respect to which bias is measured.\\n\\n* **Facet value**:\\nThe feature values of attributes that bias might favor or disfavor.\",\n \"_____no_output_____\"\n ],\n [\n \"# Posttraining Bias Metrics\\nhttps://docs.aws.amazon.com/sagemaker/latest/dg/clarify-measure-post-training-bias.html\\n\\n* **Difference in Positive Proportions in Predicted Labels (DPPL)**:\\nMeasures the difference in the proportion of positive predictions between the favored facet a and the disfavored facet d.\\n\\n* **Disparate Impact (DI)**:\\nMeasures the ratio of proportions of the predicted labels for the favored facet a and the disfavored facet d.\\n\\n* **Difference in Conditional Acceptance (DCAcc)**:\\nCompares the observed labels to the labels predicted by a model and assesses whether this is the same across facets for predicted positive outcomes (acceptances).\\n\\n* **Difference in Conditional Rejection (DCR)**:\\nCompares the observed labels to the labels predicted by a model and assesses whether this is the same across facets for negative outcomes (rejections).\\n\\n* **Recall Difference (RD)**:\\nCompares the recall of the model for the favored and disfavored facets.\\n\\n* **Difference in Acceptance Rates (DAR)**:\\nMeasures the difference in the ratios of the observed positive outcomes (TP) to the predicted positives (TP + FP) between the favored and disfavored facets.\\n\\n* **Difference in Rejection Rates (DRR)**:\\nMeasures the difference in the ratios of the observed negative outcomes (TN) to the predicted negatives (TN + FN) between the disfavored and favored facets.\\n\\n* **Accuracy Difference (AD)**:\\nMeasures the difference between the prediction accuracy for the favored and disfavored facets.\\n\\n* **Treatment Equality (TE)**:\\nMeasures the difference in the ratio of false positives to false negatives between the favored and disfavored facets.\\n\\n* **Conditional Demographic Disparity in Predicted Labels (CDDPL)**:\\nMeasures the disparity of predicted labels between the facets as a whole, but also by subgroups.\\n\\n* **Counterfactual Fliptest (FT)**:\\nExamines each member of facet d and assesses whether similar members of facet a have different model predictions.\\n\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"import boto3\\nimport sagemaker\\nimport pandas as pd\\nimport numpy as np\\n\\nsess = sagemaker.Session()\\nbucket = sess.default_bucket()\\nregion = boto3.Session().region_name\\n\\nimport botocore.config\\n\\nconfig = botocore.config.Config(\\n user_agent_extra='dsoaws/1.0'\\n)\\n\\nsm = boto3.Session().client(service_name=\\\"sagemaker\\\", \\n region_name=region,\\n config=config)\",\n \"_____no_output_____\"\n ],\n [\n \"%store -r role\",\n \"_____no_output_____\"\n ],\n [\n \"import matplotlib.pyplot as plt\\n\\n%matplotlib inline\\n%config InlineBackend.figure_format='retina'\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# Test data for bias\\n\\nWe created test data in JSONLines format to match the model inputs. \",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"test_data_bias_path = \\\"./data-clarify/test_data_bias.jsonl\\\"\",\n \"_____no_output_____\"\n ],\n [\n \"!head -n 1 $test_data_bias_path\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"### Upload the data\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"test_data_bias_s3_uri = sess.upload_data(bucket=bucket, key_prefix=\\\"bias/test_data_bias\\\", path=test_data_bias_path)\\ntest_data_bias_s3_uri\",\n \"_____no_output_____\"\n ],\n [\n \"!aws s3 ls $test_data_bias_s3_uri\",\n \"_____no_output_____\"\n ],\n [\n \"%store test_data_bias_s3_uri\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# Run Posttraining Model Bias Analysis\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"%store -r pipeline_name\",\n \"_____no_output_____\"\n ],\n [\n \"print(pipeline_name)\",\n \"_____no_output_____\"\n ],\n [\n \"%%time\\n\\nimport time\\nfrom pprint import pprint\\n\\nexecutions_response = sm.list_pipeline_executions(PipelineName=pipeline_name)[\\\"PipelineExecutionSummaries\\\"]\\npipeline_execution_status = executions_response[0][\\\"PipelineExecutionStatus\\\"]\\nprint(pipeline_execution_status)\\n\\nwhile pipeline_execution_status == \\\"Executing\\\":\\n try:\\n executions_response = sm.list_pipeline_executions(PipelineName=pipeline_name)[\\\"PipelineExecutionSummaries\\\"]\\n pipeline_execution_status = executions_response[0][\\\"PipelineExecutionStatus\\\"]\\n except Exception as e:\\n print(\\\"Please wait...\\\")\\n time.sleep(30)\\n\\npprint(executions_response)\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# List Pipeline Execution Steps\\n\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"pipeline_execution_status = executions_response[0][\\\"PipelineExecutionStatus\\\"]\\nprint(pipeline_execution_status)\",\n \"_____no_output_____\"\n ],\n [\n \"pipeline_execution_arn = executions_response[0][\\\"PipelineExecutionArn\\\"]\\nprint(pipeline_execution_arn)\",\n \"_____no_output_____\"\n ],\n [\n \"from pprint import pprint\\n\\nsteps = sm.list_pipeline_execution_steps(PipelineExecutionArn=pipeline_execution_arn)\\n\\npprint(steps)\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# View Created Model\\n_Note: If the trained model did not pass the Evaluation step (> accuracy threshold), it will not be created._\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"for execution_step in steps[\\\"PipelineExecutionSteps\\\"]:\\n if execution_step[\\\"StepName\\\"] == \\\"CreateModel\\\":\\n model_arn = execution_step[\\\"Metadata\\\"][\\\"Model\\\"][\\\"Arn\\\"]\\n break\\nprint(model_arn)\\n\\npipeline_model_name = model_arn.split(\\\"/\\\")[-1]\\nprint(pipeline_model_name)\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# SageMakerClarifyProcessor\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"from sagemaker import clarify\\n\\nclarify_processor = clarify.SageMakerClarifyProcessor(\\n role=role, \\n instance_count=1, \\n instance_type=\\\"ml.c5.2xlarge\\\", \\n sagemaker_session=sess\\n)\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# Writing DataConfig and ModelConfig\\nA `DataConfig` object communicates some basic information about data I/O to Clarify. We specify where to find the input dataset, where to store the output, the target column (`label`), the header names, and the dataset type.\\n\\nSimilarly, the `ModelConfig` object communicates information about your trained model and `ModelPredictedLabelConfig` provides information on the format of your predictions. \\n\\n**Note**: To avoid additional traffic to your production models, SageMaker Clarify sets up and tears down a dedicated endpoint when processing. `ModelConfig` specifies your preferred instance type and instance count used to run your model on during Clarify's processing.\",\n \"_____no_output_____\"\n ],\n [\n \"## DataConfig\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"bias_report_prefix = \\\"bias/report-{}\\\".format(pipeline_model_name)\\n\\nbias_report_output_path = \\\"s3://{}/{}\\\".format(bucket, bias_report_prefix)\\n\\ndata_config = clarify.DataConfig(\\n s3_data_input_path=test_data_bias_s3_uri,\\n s3_output_path=bias_report_output_path,\\n label=\\\"star_rating\\\",\\n features=\\\"features\\\",\\n # label must be last, features in exact order as passed into model\\n headers=[\\\"review_body\\\", \\\"product_category\\\", \\\"star_rating\\\"],\\n dataset_type=\\\"application/jsonlines\\\",\\n)\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"## ModelConfig\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"model_config = clarify.ModelConfig(\\n model_name=pipeline_model_name,\\n instance_type=\\\"ml.m5.4xlarge\\\",\\n instance_count=1,\\n content_type=\\\"application/jsonlines\\\",\\n accept_type=\\\"application/jsonlines\\\",\\n # {\\\"features\\\": [\\\"the worst\\\", \\\"Digital_Software\\\"]}\\n content_template='{\\\"features\\\":$features}',\\n)\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"## _Note: `label` is set to the JSON key for the model prediction results_\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"predictions_config = clarify.ModelPredictedLabelConfig(label=\\\"predicted_label\\\")\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"## BiasConfig\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"bias_config = clarify.BiasConfig(\\n label_values_or_threshold=[\\n 5,\\n 4,\\n ], # needs to be int or str for continuous dtype, needs to be >1 for categorical dtype\\n facet_name=\\\"product_category\\\",\\n)\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# Run Clarify Job\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"clarify_processor.run_post_training_bias(\\n data_config=data_config,\\n data_bias_config=bias_config,\\n model_config=model_config,\\n model_predicted_label_config=predictions_config,\\n # methods='all', # FlipTest requires all columns to be numeric\\n methods=[\\\"DPPL\\\", \\\"DI\\\", \\\"DCA\\\", \\\"DCR\\\", \\\"RD\\\", \\\"DAR\\\", \\\"DRR\\\", \\\"AD\\\", \\\"TE\\\"],\\n wait=False,\\n logs=False,\\n)\",\n \"_____no_output_____\"\n ],\n [\n \"run_post_training_bias_processing_job_name = clarify_processor.latest_job.job_name\\nrun_post_training_bias_processing_job_name\",\n \"_____no_output_____\"\n ],\n [\n \"from IPython.core.display import display, HTML\\n\\ndisplay(\\n HTML(\\n 'Review Processing Job'.format(\\n region, run_post_training_bias_processing_job_name\\n )\\n )\\n)\",\n \"_____no_output_____\"\n ],\n [\n \"from IPython.core.display import display, HTML\\n\\ndisplay(\\n HTML(\\n 'Review CloudWatch Logs After About 5 Minutes'.format(\\n region, run_post_training_bias_processing_job_name\\n )\\n )\\n)\",\n \"_____no_output_____\"\n ],\n [\n \"from IPython.core.display import display, HTML\\n\\ndisplay(\\n HTML(\\n 'Review S3 Output Data After The Processing Job Has Completed'.format(\\n bucket, bias_report_prefix\\n )\\n )\\n)\",\n \"_____no_output_____\"\n ],\n [\n \"from pprint import pprint\\n\\nrunning_processor = sagemaker.processing.ProcessingJob.from_processing_name(\\n processing_job_name=run_post_training_bias_processing_job_name, sagemaker_session=sess\\n)\\n\\nprocessing_job_description = running_processor.describe()\\n\\npprint(processing_job_description)\",\n \"_____no_output_____\"\n ],\n [\n \"running_processor.wait(logs=False)\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# Download Report From S3\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"!aws s3 ls $bias_report_output_path/\",\n \"_____no_output_____\"\n ],\n [\n \"!aws s3 cp --recursive $bias_report_output_path ./generated_bias_report/\",\n \"_____no_output_____\"\n ],\n [\n \"from IPython.core.display import display, HTML\\n\\ndisplay(HTML('Review Bias Report'))\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# View Bias Report in Studio\\nIn Studio, you can view the results under the experiments tab.\\n\\n\\n\\nEach bias metric has detailed explanations with examples that you can explore.\\n\\n\\n\\nYou could also summarize the results in a handy table!\\n\\n\",\n \"_____no_output_____\"\n ],\n [\n \"# Release Resources\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"%%html\\n\\n

Shutting down your kernel for this notebook to release resources.

\\n\\n \\n\",\n \"_____no_output_____\"\n ]\n ]\n]"},"cell_types":{"kind":"list like","value":["markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code"],"string":"[\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\"\n]"},"cell_type_groups":{"kind":"list like","value":[["markdown","markdown","markdown","markdown"],["code","code","code"],["markdown"],["code","code"],["markdown"],["code","code","code"],["markdown"],["code","code","code"],["markdown"],["code","code","code"],["markdown"],["code"],["markdown"],["code"],["markdown","markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code","code","code","code","code","code","code"],["markdown"],["code","code","code"],["markdown","markdown"],["code"]],"string":"[\n [\n \"markdown\",\n \"markdown\",\n \"markdown\",\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\",\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\"\n ],\n [\n \"markdown\",\n \"markdown\"\n ],\n [\n \"code\"\n ]\n]"}}},{"rowIdx":1458809,"cells":{"hexsha":{"kind":"string","value":"e7e2e25c47bc9d536a1d5dcd4a5128be5a6db558"},"size":{"kind":"number","value":13607,"string":"13,607"},"ext":{"kind":"string","value":"ipynb"},"lang":{"kind":"string","value":"Jupyter Notebook"},"max_stars_repo_path":{"kind":"string","value":"coles.ipynb"},"max_stars_repo_name":{"kind":"string","value":"adambadge/coles-scraper"},"max_stars_repo_head_hexsha":{"kind":"string","value":"d388eabfc00e6e78002a28c69b66b825684df604"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":4,"string":"4"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2020-10-27T23:49:21.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2022-03-04T22:21:57.000Z"},"max_issues_repo_path":{"kind":"string","value":"coles.ipynb"},"max_issues_repo_name":{"kind":"string","value":"adambadge/coles-scraper"},"max_issues_repo_head_hexsha":{"kind":"string","value":"d388eabfc00e6e78002a28c69b66b825684df604"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"number","value":1,"string":"1"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2020-07-16T03:37:41.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2021-06-29T02:41:56.000Z"},"max_forks_repo_path":{"kind":"string","value":"coles.ipynb"},"max_forks_repo_name":{"kind":"string","value":"adambadge/coles-scraper"},"max_forks_repo_head_hexsha":{"kind":"string","value":"d388eabfc00e6e78002a28c69b66b825684df604"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"number","value":2,"string":"2"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2020-11-29T03:45:45.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2022-03-04T22:22:00.000Z"},"avg_line_length":{"kind":"number","value":76.4438202247,"string":"76.44382"},"max_line_length":{"kind":"number","value":714,"string":"714"},"alphanum_fraction":{"kind":"number","value":0.593371059,"string":"0.593371"},"cells":{"kind":"list like","value":[[["import requests\n\nurl = 'https://api.coles.com.au/customer/v1/coles/products/search?limit=20&q=Drinks&start=40&storeId=7716&type=SKU'\nh = {\n'Accept-Encoding': 'gzip'\n,'Connection': 'keep-alive'\n,'Accept': '*/*' \n,'User-Agent': 'Shopmate/3.4.1 (iPhone; iOS 11.4.1; Scale/3.00)'\n,'X-Coles-API-Key': '046bc0d4-3854-481f-80dc-85f9e846503d'\n,'X-Coles-API-Secret': 'e6ab96ff-453b-45ba-a2be-ae8d7c12cadf'\n,'Accept-Language': 'en-AU;q=1'\n}\n\nr = requests.get(url, headers=h)","_____no_output_____"],["j = r.json()\n","_____no_output_____"],["results = j['Results']","_____no_output_____"],["print(len(results))","20\n"],["for x in results:\n print(x['Name'])","Mango Juice\nLemonade Gazoz Drink\nPineapple Juice Box 250mL\n100% Apple Juice Box 200mL\nGuava Nectar Fruit Drink\nApricot Nectar Fruit Drink\nCranberry Fruit Drink\nLow Sugar Cranberry Drink\nCranberry Blueberry Drink\nTropical Fruit Drink\nOrange Fruit Drink\nCranberry Fruit Drink\nApple Fruit Drink\nTropical Fruit Drink Chilled\nChilled Orange Fruit Drink\nHot Lemon Drink\nCola Soft Drink\nProbiotic Drink Blueberry\nLychee Aloe Vera Drink\nAloe Vera Drink\n"],["for value in results:\n print(value.values())","dict_values([4978690, 'Natural Spring Water 600ml', '24 pack', 'Frantelle', '/customer/v1/coles/products/images/4978690.jpg', [{'Aisle': 'GROCERY', 'Order': 9999.0, 'Description': 'Grocery', 'AisleSide': None, 'Facing': 0, 'Shelf': 0, 'LayoutId': '0259', 'LayoutName': 'DRINKS - WATER'}, {'Aisle': '6', 'Order': 6.0, 'Description': 'Aisle 6', 'AisleSide': 'Right', 'Facing': 0, 'Shelf': 0, 'LayoutId': '0259', 'LayoutName': 'DRINKS - WATER'}], None, [{'Label': 'drinks', 'Name': 'Drinks', 'TagType': {'Label': 'online-3', 'Name': 'Online 3'}}]])\ndict_values([7365777, 'Classic Can Soft Drink 24 pack', '375mL', 'Coca-Cola', '/customer/v1/coles/products/images/7365777.jpg', [{'Aisle': '6', 'Order': 6.0, 'Description': 'Aisle 6', 'AisleSide': 'Left', 'Facing': 0, 'Shelf': 0, 'LayoutId': '0304', 'LayoutName': 'DRINKS - CANS BULK PACK'}], None, [{'Label': 'drinks', 'Name': 'Drinks', 'TagType': {'Label': 'online-3', 'Name': 'Online 3'}}]])\ndict_values([7366022, 'Pepsi Max 375mL Cans Soft Drink', '24 pack', 'Schweppes', '/customer/v1/coles/products/images/7366022.jpg', [{'Aisle': '6', 'Order': 6.0, 'Description': 'Aisle 6', 'AisleSide': 'Left', 'Facing': 0, 'Shelf': 0, 'LayoutId': '0304', 'LayoutName': 'DRINKS - CANS BULK PACK'}], [{'PromotionId': 173021527, 'Type': 'Value (Excl FP)', 'Description': '1/2 Price', 'Price': 10.5, 'WasPrice': 21.0, 'SaveAmount': 10.5, 'UnitPrice': '$1.17 per 1L', 'UnitOfMeasure': 'EA', 'PriceDescription': None, 'StartDate': '2018-08-08T00:00:00', 'EndDate': '2018-08-14T23:59:59', 'SavePercent': 50.0}], [{'Label': 'Drinks', 'Name': 'Drinks', 'TagType': {'Label': 'online-3', 'Name': 'online-3'}}]])\ndict_values([9391574, 'Solo Lemon 375mL Cans Soft Drink', '24 pack', 'Schweppes', '/customer/v1/coles/products/images/9391574.jpg', [{'Aisle': '6', 'Order': 6.0, 'Description': 'Aisle 6', 'AisleSide': 'Left', 'Facing': 0, 'Shelf': 0, 'LayoutId': '0304', 'LayoutName': 'DRINKS - CANS BULK PACK'}], [{'PromotionId': 173021559, 'Type': 'Value (Excl FP)', 'Description': '1/2 Price', 'Price': 10.5, 'WasPrice': 21.0, 'SaveAmount': 10.5, 'UnitPrice': '$1.17 per 1L', 'UnitOfMeasure': 'EA', 'PriceDescription': None, 'StartDate': '2018-08-08T00:00:00', 'EndDate': '2018-08-14T23:59:59', 'SavePercent': 50.0}], [{'Label': 'Drinks', 'Name': 'Drinks', 'TagType': {'Label': 'online-3', 'Name': 'online-3'}}]])\ndict_values([2383341, 'Original Green Energy Drink 275mL Cans', '4 pack', 'V', '/customer/v1/coles/products/images/2383341.jpg', [{'Aisle': '6', 'Order': 6.0, 'Description': 'Aisle 6', 'AisleSide': 'Right', 'Facing': 0, 'Shelf': 0, 'LayoutId': '0256', 'LayoutName': 'DRINKS - LIFESTYLE'}], [{'PromotionId': 165505563, 'Type': 'Value (Excl FP)', 'Description': '30% Off', 'Price': 6.0, 'WasPrice': 9.35, 'SaveAmount': 3.35, 'UnitPrice': '$5.45 per 1L', 'UnitOfMeasure': 'EA', 'PriceDescription': None, 'StartDate': '2018-08-08T00:00:00', 'EndDate': '2018-08-14T23:59:59', 'SavePercent': 35.83}], [{'Label': 'Drinks', 'Name': 'Drinks', 'TagType': {'Label': 'online-3', 'Name': 'online-3'}}]])\ndict_values([2894147, 'Guarana Energy Drink Fridge Pack', '10 pack', 'V', '/customer/v1/coles/products/images/2894147.jpg', [{'Aisle': '6', 'Order': 6.0, 'Description': 'Aisle 6', 'AisleSide': 'Right', 'Facing': 0, 'Shelf': 0, 'LayoutId': '0256', 'LayoutName': 'DRINKS - LIFESTYLE'}], None, [{'Label': 'Drinks', 'Name': 'Drinks', 'TagType': {'Label': 'online-3', 'Name': 'online-3'}}]])\ndict_values([7388750, 'Guava Energy Drink', '500mL', 'Rockstar', '/customer/v1/coles/products/images/7388750.jpg', None, None, [{'Label': 'Drinks', 'Name': 'Drinks', 'TagType': {'Label': 'online-3', 'Name': 'online-3'}}]])\ndict_values([8464796, 'Classic Cans Soft Drink 30 pack', '375mL', 'Coca-Cola', '/customer/v1/coles/products/images/8464796.jpg', [{'Aisle': '6', 'Order': 6.0, 'Description': 'Aisle 6', 'AisleSide': 'Left', 'Facing': 0, 'Shelf': 0, 'LayoutId': '0304', 'LayoutName': 'DRINKS - CANS BULK PACK'}], [{'PromotionId': 171861648, 'Type': 'Value (Excl FP)', 'Description': '40% Off', 'Price': 18.0, 'WasPrice': 34.45, 'SaveAmount': 16.45, 'UnitPrice': '$1.60 per 1L', 'UnitOfMeasure': 'EA', 'PriceDescription': None, 'StartDate': '2018-08-08T00:00:00', 'EndDate': '2018-08-14T23:59:59', 'SavePercent': 47.75}], [{'Label': 'Drinks', 'Name': 'Drinks', 'TagType': {'Label': 'online-3', 'Name': 'online-3'}}]])\ndict_values([3190570, 'Pure 4x250ml', '4 pack', 'V Energy Drink', '/customer/v1/coles/products/images/3190570.jpg', [{'Aisle': '6', 'Order': 6.0, 'Description': 'Aisle 6', 'AisleSide': 'Right', 'Facing': 0, 'Shelf': 0, 'LayoutId': '0256', 'LayoutName': 'DRINKS - LIFESTYLE'}], [{'PromotionId': 165505647, 'Type': 'Value (Excl FP)', 'Description': '30% Off', 'Price': 6.0, 'WasPrice': 9.35, 'SaveAmount': 3.35, 'UnitPrice': '$6.00 per 1L', 'UnitOfMeasure': 'EA', 'PriceDescription': None, 'StartDate': '2018-08-08T00:00:00', 'EndDate': '2018-08-14T23:59:59', 'SavePercent': 35.83}], [{'Label': 'Drinks', 'Name': 'Drinks', 'TagType': {'Label': 'online-3', 'Name': 'online-3'}}]])\ndict_values([3190569, 'Energy Drink Deadpool Can Limited Edition', '250mL', 'V', '/customer/v1/coles/products/images/3190569.jpg', [{'Aisle': '6', 'Order': 6.0, 'Description': 'Aisle 6', 'AisleSide': 'Right', 'Facing': 0, 'Shelf': 0, 'LayoutId': '0256', 'LayoutName': 'DRINKS - LIFESTYLE'}], None, [{'Label': 'Drinks', 'Name': 'Drinks', 'TagType': {'Label': 'online-3', 'Name': 'online-3'}}]])\ndict_values([3190580, 'Lewis Hamilton Grape', '500mL', 'Monster Energy', '/customer/v1/coles/products/images/3190580.jpg', [{'Aisle': '6', 'Order': 6.0, 'Description': 'Aisle 6', 'AisleSide': 'Right', 'Facing': 0, 'Shelf': 0, 'LayoutId': '0256', 'LayoutName': 'DRINKS - LIFESTYLE'}], None, [{'Label': 'Drinks', 'Name': 'Drinks', 'TagType': {'Label': 'online-3', 'Name': 'online-3'}}]])\ndict_values([2383363, 'Energy Drink Blue 4 x 275ml Cans', '4 pack', 'V', '/customer/v1/coles/products/images/2383363.jpg', [{'Aisle': '6', 'Order': 6.0, 'Description': 'Aisle 6', 'AisleSide': 'Right', 'Facing': 0, 'Shelf': 0, 'LayoutId': '0256', 'LayoutName': 'DRINKS - LIFESTYLE'}], [{'PromotionId': 165505631, 'Type': 'Value (Excl FP)', 'Description': '30% Off', 'Price': 6.0, 'WasPrice': 9.35, 'SaveAmount': 3.35, 'UnitPrice': '$5.45 per 1L', 'UnitOfMeasure': 'EA', 'PriceDescription': None, 'StartDate': '2018-08-08T00:00:00', 'EndDate': '2018-08-14T23:59:59', 'SavePercent': 35.83}], [{'Label': 'Drinks', 'Name': 'Drinks', 'TagType': {'Label': 'online-3', 'Name': 'online-3'}}]])\ndict_values([1492443, 'Zero 250mL Cans Energy Drink 4 Pack', '4 pack', 'Red Bull', '/customer/v1/coles/products/images/1492443.jpg', [{'Aisle': '6', 'Order': 6.0, 'Description': 'Aisle 6', 'AisleSide': 'Right', 'Facing': 0, 'Shelf': 0, 'LayoutId': '0256', 'LayoutName': 'DRINKS - LIFESTYLE'}], None, [{'Label': 'Drinks', 'Name': 'Drinks', 'TagType': {'Label': 'online-3', 'Name': 'online-3'}}]])\ndict_values([3192495, 'Energy Drink 473mL', '4 pack', 'Red Bull', '/customer/v1/coles/products/images/3192495.jpg', [{'Aisle': '6', 'Order': 6.0, 'Description': 'Aisle 6', 'AisleSide': 'Right', 'Facing': 0, 'Shelf': 0, 'LayoutId': '0256', 'LayoutName': 'DRINKS - LIFESTYLE'}], None, [{'Label': 'Drinks', 'Name': 'Drinks', 'TagType': {'Label': 'online-3', 'Name': 'online-3'}}]])\ndict_values([2785017, 'Sugar Free Original Energy Drink Can', '473mL', 'Red Bull', '/customer/v1/coles/products/images/2785017.jpg', [{'Aisle': '6', 'Order': 6.0, 'Description': 'Aisle 6', 'AisleSide': 'Right', 'Facing': 0, 'Shelf': 0, 'LayoutId': '0256', 'LayoutName': 'DRINKS - LIFESTYLE'}], None, [{'Label': 'Drinks', 'Name': 'Drinks', 'TagType': {'Label': 'online-3', 'Name': 'online-3'}}]])\ndict_values([2785108, 'Zero Ultra Energy Drink Cans 4 Pack', '500ml', 'Monster', '/customer/v1/coles/products/images/2785108.jpg', [{'Aisle': '6', 'Order': 6.0, 'Description': 'Aisle 6', 'AisleSide': 'Right', 'Facing': 0, 'Shelf': 0, 'LayoutId': '0256', 'LayoutName': 'DRINKS - LIFESTYLE'}], [{'PromotionId': 172314178, 'Type': 'Value (Excl FP)', 'Description': '40% Off', 'Price': 7.0, 'WasPrice': 11.7, 'SaveAmount': 4.7, 'UnitPrice': '$3.50 per 1L', 'UnitOfMeasure': 'EA', 'PriceDescription': None, 'StartDate': '2018-08-08T00:00:00', 'EndDate': '2018-08-14T23:59:59', 'SavePercent': 40.17}], [{'Label': 'Drinks', 'Name': 'Drinks', 'TagType': {'Label': 'online-3', 'Name': 'online-3'}}]])\ndict_values([2787147, 'Ultra Can Energy Drink', '500mL', 'Monster', '/customer/v1/coles/products/images/2787147.jpg', [{'Aisle': '6', 'Order': 6.0, 'Description': 'Aisle 6', 'AisleSide': 'Right', 'Facing': 0, 'Shelf': 0, 'LayoutId': '0256', 'LayoutName': 'DRINKS - LIFESTYLE'}], None, [{'Label': 'Drinks', 'Name': 'Drinks', 'TagType': {'Label': 'online-3', 'Name': 'online-3'}}]])\ndict_values([1492454, 'Zero Energy Drink Can', '250mL', 'Red Bull', '/customer/v1/coles/products/images/1492454.jpg', [{'Aisle': '6', 'Order': 6.0, 'Description': 'Aisle 6', 'AisleSide': 'Right', 'Facing': 0, 'Shelf': 0, 'LayoutId': '0256', 'LayoutName': 'DRINKS - LIFESTYLE'}], None, [{'Label': 'Drinks', 'Name': 'Drinks', 'TagType': {'Label': 'online-3', 'Name': 'online-3'}}]])\ndict_values([3269705, 'Tortured Orchard Raspberry Lemonade Energy Drink Can', '250mL', 'V', '/customer/v1/coles/products/images/3269705.jpg', [{'Aisle': None, 'Order': 9999.0, 'Description': None, 'AisleSide': None, 'Facing': 0, 'Shelf': 0, 'LayoutId': '0608', 'LayoutName': 'FLEX - FOOTY FINALS'}], None, [{'Label': 'Drinks', 'Name': 'Drinks', 'TagType': {'Label': 'online-3', 'Name': 'online-3'}}]])\ndict_values([1046120, 'Original Chilled Can Energy Drink', '250mL', 'Mother', '/customer/v1/coles/products/images/1046120.jpg', [{'Aisle': 'SERVICE', 'Order': 9999.0, 'Description': 'Service desk', 'AisleSide': 'Front of Store', 'Facing': 0, 'Shelf': 0, 'LayoutId': '9049', 'LayoutName': 'COLD DRINK - OD290/330'}], [{'PromotionId': 170376039, 'Type': 'Every Day', 'Description': None, 'Price': 2.0, 'WasPrice': None, 'SaveAmount': None, 'UnitPrice': '$8.00 per 1L', 'UnitOfMeasure': 'EA', 'PriceDescription': None, 'StartDate': '2018-07-04T00:00:00', 'EndDate': '2018-10-14T23:59:59', 'SavePercent': None}], [{'Label': 'Drinks', 'Name': 'Drinks', 'TagType': {'Label': 'online-3', 'Name': 'online-3'}}]])\n"]]],"string":"[\n [\n [\n \"import requests\\n\\nurl = 'https://api.coles.com.au/customer/v1/coles/products/search?limit=20&q=Drinks&start=40&storeId=7716&type=SKU'\\nh = {\\n'Accept-Encoding': 'gzip'\\n,'Connection': 'keep-alive'\\n,'Accept': '*/*' \\n,'User-Agent': 'Shopmate/3.4.1 (iPhone; iOS 11.4.1; Scale/3.00)'\\n,'X-Coles-API-Key': '046bc0d4-3854-481f-80dc-85f9e846503d'\\n,'X-Coles-API-Secret': 'e6ab96ff-453b-45ba-a2be-ae8d7c12cadf'\\n,'Accept-Language': 'en-AU;q=1'\\n}\\n\\nr = requests.get(url, headers=h)\",\n \"_____no_output_____\"\n ],\n [\n \"j = r.json()\\n\",\n \"_____no_output_____\"\n ],\n [\n \"results = j['Results']\",\n \"_____no_output_____\"\n ],\n [\n \"print(len(results))\",\n \"20\\n\"\n ],\n [\n \"for x in results:\\n print(x['Name'])\",\n \"Mango Juice\\nLemonade Gazoz Drink\\nPineapple Juice Box 250mL\\n100% Apple Juice Box 200mL\\nGuava Nectar Fruit Drink\\nApricot Nectar Fruit Drink\\nCranberry Fruit Drink\\nLow Sugar Cranberry Drink\\nCranberry Blueberry Drink\\nTropical Fruit Drink\\nOrange Fruit Drink\\nCranberry Fruit Drink\\nApple Fruit Drink\\nTropical Fruit Drink Chilled\\nChilled Orange Fruit Drink\\nHot Lemon Drink\\nCola Soft Drink\\nProbiotic Drink Blueberry\\nLychee Aloe Vera Drink\\nAloe Vera Drink\\n\"\n ],\n [\n \"for value in results:\\n print(value.values())\",\n \"dict_values([4978690, 'Natural Spring Water 600ml', '24 pack', 'Frantelle', '/customer/v1/coles/products/images/4978690.jpg', [{'Aisle': 'GROCERY', 'Order': 9999.0, 'Description': 'Grocery', 'AisleSide': None, 'Facing': 0, 'Shelf': 0, 'LayoutId': '0259', 'LayoutName': 'DRINKS - WATER'}, {'Aisle': '6', 'Order': 6.0, 'Description': 'Aisle 6', 'AisleSide': 'Right', 'Facing': 0, 'Shelf': 0, 'LayoutId': '0259', 'LayoutName': 'DRINKS - WATER'}], None, [{'Label': 'drinks', 'Name': 'Drinks', 'TagType': {'Label': 'online-3', 'Name': 'Online 3'}}]])\\ndict_values([7365777, 'Classic Can Soft Drink 24 pack', '375mL', 'Coca-Cola', '/customer/v1/coles/products/images/7365777.jpg', [{'Aisle': '6', 'Order': 6.0, 'Description': 'Aisle 6', 'AisleSide': 'Left', 'Facing': 0, 'Shelf': 0, 'LayoutId': '0304', 'LayoutName': 'DRINKS - CANS BULK PACK'}], None, [{'Label': 'drinks', 'Name': 'Drinks', 'TagType': {'Label': 'online-3', 'Name': 'Online 3'}}]])\\ndict_values([7366022, 'Pepsi Max 375mL Cans Soft Drink', '24 pack', 'Schweppes', '/customer/v1/coles/products/images/7366022.jpg', [{'Aisle': '6', 'Order': 6.0, 'Description': 'Aisle 6', 'AisleSide': 'Left', 'Facing': 0, 'Shelf': 0, 'LayoutId': '0304', 'LayoutName': 'DRINKS - CANS BULK PACK'}], [{'PromotionId': 173021527, 'Type': 'Value (Excl FP)', 'Description': '1/2 Price', 'Price': 10.5, 'WasPrice': 21.0, 'SaveAmount': 10.5, 'UnitPrice': '$1.17 per 1L', 'UnitOfMeasure': 'EA', 'PriceDescription': None, 'StartDate': '2018-08-08T00:00:00', 'EndDate': '2018-08-14T23:59:59', 'SavePercent': 50.0}], [{'Label': 'Drinks', 'Name': 'Drinks', 'TagType': {'Label': 'online-3', 'Name': 'online-3'}}]])\\ndict_values([9391574, 'Solo Lemon 375mL Cans Soft Drink', '24 pack', 'Schweppes', '/customer/v1/coles/products/images/9391574.jpg', [{'Aisle': '6', 'Order': 6.0, 'Description': 'Aisle 6', 'AisleSide': 'Left', 'Facing': 0, 'Shelf': 0, 'LayoutId': '0304', 'LayoutName': 'DRINKS - CANS BULK PACK'}], [{'PromotionId': 173021559, 'Type': 'Value (Excl FP)', 'Description': '1/2 Price', 'Price': 10.5, 'WasPrice': 21.0, 'SaveAmount': 10.5, 'UnitPrice': '$1.17 per 1L', 'UnitOfMeasure': 'EA', 'PriceDescription': None, 'StartDate': '2018-08-08T00:00:00', 'EndDate': '2018-08-14T23:59:59', 'SavePercent': 50.0}], [{'Label': 'Drinks', 'Name': 'Drinks', 'TagType': {'Label': 'online-3', 'Name': 'online-3'}}]])\\ndict_values([2383341, 'Original Green Energy Drink 275mL Cans', '4 pack', 'V', '/customer/v1/coles/products/images/2383341.jpg', [{'Aisle': '6', 'Order': 6.0, 'Description': 'Aisle 6', 'AisleSide': 'Right', 'Facing': 0, 'Shelf': 0, 'LayoutId': '0256', 'LayoutName': 'DRINKS - LIFESTYLE'}], [{'PromotionId': 165505563, 'Type': 'Value (Excl FP)', 'Description': '30% Off', 'Price': 6.0, 'WasPrice': 9.35, 'SaveAmount': 3.35, 'UnitPrice': '$5.45 per 1L', 'UnitOfMeasure': 'EA', 'PriceDescription': None, 'StartDate': '2018-08-08T00:00:00', 'EndDate': '2018-08-14T23:59:59', 'SavePercent': 35.83}], [{'Label': 'Drinks', 'Name': 'Drinks', 'TagType': {'Label': 'online-3', 'Name': 'online-3'}}]])\\ndict_values([2894147, 'Guarana Energy Drink Fridge Pack', '10 pack', 'V', '/customer/v1/coles/products/images/2894147.jpg', [{'Aisle': '6', 'Order': 6.0, 'Description': 'Aisle 6', 'AisleSide': 'Right', 'Facing': 0, 'Shelf': 0, 'LayoutId': '0256', 'LayoutName': 'DRINKS - LIFESTYLE'}], None, [{'Label': 'Drinks', 'Name': 'Drinks', 'TagType': {'Label': 'online-3', 'Name': 'online-3'}}]])\\ndict_values([7388750, 'Guava Energy Drink', '500mL', 'Rockstar', '/customer/v1/coles/products/images/7388750.jpg', None, None, [{'Label': 'Drinks', 'Name': 'Drinks', 'TagType': {'Label': 'online-3', 'Name': 'online-3'}}]])\\ndict_values([8464796, 'Classic Cans Soft Drink 30 pack', '375mL', 'Coca-Cola', '/customer/v1/coles/products/images/8464796.jpg', [{'Aisle': '6', 'Order': 6.0, 'Description': 'Aisle 6', 'AisleSide': 'Left', 'Facing': 0, 'Shelf': 0, 'LayoutId': '0304', 'LayoutName': 'DRINKS - CANS BULK PACK'}], [{'PromotionId': 171861648, 'Type': 'Value (Excl FP)', 'Description': '40% Off', 'Price': 18.0, 'WasPrice': 34.45, 'SaveAmount': 16.45, 'UnitPrice': '$1.60 per 1L', 'UnitOfMeasure': 'EA', 'PriceDescription': None, 'StartDate': '2018-08-08T00:00:00', 'EndDate': '2018-08-14T23:59:59', 'SavePercent': 47.75}], [{'Label': 'Drinks', 'Name': 'Drinks', 'TagType': {'Label': 'online-3', 'Name': 'online-3'}}]])\\ndict_values([3190570, 'Pure 4x250ml', '4 pack', 'V Energy Drink', '/customer/v1/coles/products/images/3190570.jpg', [{'Aisle': '6', 'Order': 6.0, 'Description': 'Aisle 6', 'AisleSide': 'Right', 'Facing': 0, 'Shelf': 0, 'LayoutId': '0256', 'LayoutName': 'DRINKS - LIFESTYLE'}], [{'PromotionId': 165505647, 'Type': 'Value (Excl FP)', 'Description': '30% Off', 'Price': 6.0, 'WasPrice': 9.35, 'SaveAmount': 3.35, 'UnitPrice': '$6.00 per 1L', 'UnitOfMeasure': 'EA', 'PriceDescription': None, 'StartDate': '2018-08-08T00:00:00', 'EndDate': '2018-08-14T23:59:59', 'SavePercent': 35.83}], [{'Label': 'Drinks', 'Name': 'Drinks', 'TagType': {'Label': 'online-3', 'Name': 'online-3'}}]])\\ndict_values([3190569, 'Energy Drink Deadpool Can Limited Edition', '250mL', 'V', '/customer/v1/coles/products/images/3190569.jpg', [{'Aisle': '6', 'Order': 6.0, 'Description': 'Aisle 6', 'AisleSide': 'Right', 'Facing': 0, 'Shelf': 0, 'LayoutId': '0256', 'LayoutName': 'DRINKS - LIFESTYLE'}], None, [{'Label': 'Drinks', 'Name': 'Drinks', 'TagType': {'Label': 'online-3', 'Name': 'online-3'}}]])\\ndict_values([3190580, 'Lewis Hamilton Grape', '500mL', 'Monster Energy', '/customer/v1/coles/products/images/3190580.jpg', [{'Aisle': '6', 'Order': 6.0, 'Description': 'Aisle 6', 'AisleSide': 'Right', 'Facing': 0, 'Shelf': 0, 'LayoutId': '0256', 'LayoutName': 'DRINKS - LIFESTYLE'}], None, [{'Label': 'Drinks', 'Name': 'Drinks', 'TagType': {'Label': 'online-3', 'Name': 'online-3'}}]])\\ndict_values([2383363, 'Energy Drink Blue 4 x 275ml Cans', '4 pack', 'V', '/customer/v1/coles/products/images/2383363.jpg', [{'Aisle': '6', 'Order': 6.0, 'Description': 'Aisle 6', 'AisleSide': 'Right', 'Facing': 0, 'Shelf': 0, 'LayoutId': '0256', 'LayoutName': 'DRINKS - LIFESTYLE'}], [{'PromotionId': 165505631, 'Type': 'Value (Excl FP)', 'Description': '30% Off', 'Price': 6.0, 'WasPrice': 9.35, 'SaveAmount': 3.35, 'UnitPrice': '$5.45 per 1L', 'UnitOfMeasure': 'EA', 'PriceDescription': None, 'StartDate': '2018-08-08T00:00:00', 'EndDate': '2018-08-14T23:59:59', 'SavePercent': 35.83}], [{'Label': 'Drinks', 'Name': 'Drinks', 'TagType': {'Label': 'online-3', 'Name': 'online-3'}}]])\\ndict_values([1492443, 'Zero 250mL Cans Energy Drink 4 Pack', '4 pack', 'Red Bull', '/customer/v1/coles/products/images/1492443.jpg', [{'Aisle': '6', 'Order': 6.0, 'Description': 'Aisle 6', 'AisleSide': 'Right', 'Facing': 0, 'Shelf': 0, 'LayoutId': '0256', 'LayoutName': 'DRINKS - LIFESTYLE'}], None, [{'Label': 'Drinks', 'Name': 'Drinks', 'TagType': {'Label': 'online-3', 'Name': 'online-3'}}]])\\ndict_values([3192495, 'Energy Drink 473mL', '4 pack', 'Red Bull', '/customer/v1/coles/products/images/3192495.jpg', [{'Aisle': '6', 'Order': 6.0, 'Description': 'Aisle 6', 'AisleSide': 'Right', 'Facing': 0, 'Shelf': 0, 'LayoutId': '0256', 'LayoutName': 'DRINKS - LIFESTYLE'}], None, [{'Label': 'Drinks', 'Name': 'Drinks', 'TagType': {'Label': 'online-3', 'Name': 'online-3'}}]])\\ndict_values([2785017, 'Sugar Free Original Energy Drink Can', '473mL', 'Red Bull', '/customer/v1/coles/products/images/2785017.jpg', [{'Aisle': '6', 'Order': 6.0, 'Description': 'Aisle 6', 'AisleSide': 'Right', 'Facing': 0, 'Shelf': 0, 'LayoutId': '0256', 'LayoutName': 'DRINKS - LIFESTYLE'}], None, [{'Label': 'Drinks', 'Name': 'Drinks', 'TagType': {'Label': 'online-3', 'Name': 'online-3'}}]])\\ndict_values([2785108, 'Zero Ultra Energy Drink Cans 4 Pack', '500ml', 'Monster', '/customer/v1/coles/products/images/2785108.jpg', [{'Aisle': '6', 'Order': 6.0, 'Description': 'Aisle 6', 'AisleSide': 'Right', 'Facing': 0, 'Shelf': 0, 'LayoutId': '0256', 'LayoutName': 'DRINKS - LIFESTYLE'}], [{'PromotionId': 172314178, 'Type': 'Value (Excl FP)', 'Description': '40% Off', 'Price': 7.0, 'WasPrice': 11.7, 'SaveAmount': 4.7, 'UnitPrice': '$3.50 per 1L', 'UnitOfMeasure': 'EA', 'PriceDescription': None, 'StartDate': '2018-08-08T00:00:00', 'EndDate': '2018-08-14T23:59:59', 'SavePercent': 40.17}], [{'Label': 'Drinks', 'Name': 'Drinks', 'TagType': {'Label': 'online-3', 'Name': 'online-3'}}]])\\ndict_values([2787147, 'Ultra Can Energy Drink', '500mL', 'Monster', '/customer/v1/coles/products/images/2787147.jpg', [{'Aisle': '6', 'Order': 6.0, 'Description': 'Aisle 6', 'AisleSide': 'Right', 'Facing': 0, 'Shelf': 0, 'LayoutId': '0256', 'LayoutName': 'DRINKS - LIFESTYLE'}], None, [{'Label': 'Drinks', 'Name': 'Drinks', 'TagType': {'Label': 'online-3', 'Name': 'online-3'}}]])\\ndict_values([1492454, 'Zero Energy Drink Can', '250mL', 'Red Bull', '/customer/v1/coles/products/images/1492454.jpg', [{'Aisle': '6', 'Order': 6.0, 'Description': 'Aisle 6', 'AisleSide': 'Right', 'Facing': 0, 'Shelf': 0, 'LayoutId': '0256', 'LayoutName': 'DRINKS - LIFESTYLE'}], None, [{'Label': 'Drinks', 'Name': 'Drinks', 'TagType': {'Label': 'online-3', 'Name': 'online-3'}}]])\\ndict_values([3269705, 'Tortured Orchard Raspberry Lemonade Energy Drink Can', '250mL', 'V', '/customer/v1/coles/products/images/3269705.jpg', [{'Aisle': None, 'Order': 9999.0, 'Description': None, 'AisleSide': None, 'Facing': 0, 'Shelf': 0, 'LayoutId': '0608', 'LayoutName': 'FLEX - FOOTY FINALS'}], None, [{'Label': 'Drinks', 'Name': 'Drinks', 'TagType': {'Label': 'online-3', 'Name': 'online-3'}}]])\\ndict_values([1046120, 'Original Chilled Can Energy Drink', '250mL', 'Mother', '/customer/v1/coles/products/images/1046120.jpg', [{'Aisle': 'SERVICE', 'Order': 9999.0, 'Description': 'Service desk', 'AisleSide': 'Front of Store', 'Facing': 0, 'Shelf': 0, 'LayoutId': '9049', 'LayoutName': 'COLD DRINK - OD290/330'}], [{'PromotionId': 170376039, 'Type': 'Every Day', 'Description': None, 'Price': 2.0, 'WasPrice': None, 'SaveAmount': None, 'UnitPrice': '$8.00 per 1L', 'UnitOfMeasure': 'EA', 'PriceDescription': None, 'StartDate': '2018-07-04T00:00:00', 'EndDate': '2018-10-14T23:59:59', 'SavePercent': None}], [{'Label': 'Drinks', 'Name': 'Drinks', 'TagType': {'Label': 'online-3', 'Name': 'online-3'}}]])\\n\"\n ]\n ]\n]"},"cell_types":{"kind":"list like","value":["code"],"string":"[\n \"code\"\n]"},"cell_type_groups":{"kind":"list like","value":[["code","code","code","code","code","code"]],"string":"[\n [\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\"\n ]\n]"}}},{"rowIdx":1458810,"cells":{"hexsha":{"kind":"string","value":"e7e2e6f5e07cc2909aa6839419bd2945080d7a7c"},"size":{"kind":"number","value":447835,"string":"447,835"},"ext":{"kind":"string","value":"ipynb"},"lang":{"kind":"string","value":"Jupyter Notebook"},"max_stars_repo_path":{"kind":"string","value":"covid_study_ver_cbc_4_sao.ipynb"},"max_stars_repo_name":{"kind":"string","value":"hikmetc/COVID-19-AI"},"max_stars_repo_head_hexsha":{"kind":"string","value":"c5623131c851bd6d79a76039823df2c8d8bb908f"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"covid_study_ver_cbc_4_sao.ipynb"},"max_issues_repo_name":{"kind":"string","value":"hikmetc/COVID-19-AI"},"max_issues_repo_head_hexsha":{"kind":"string","value":"c5623131c851bd6d79a76039823df2c8d8bb908f"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"covid_study_ver_cbc_4_sao.ipynb"},"max_forks_repo_name":{"kind":"string","value":"hikmetc/COVID-19-AI"},"max_forks_repo_head_hexsha":{"kind":"string","value":"c5623131c851bd6d79a76039823df2c8d8bb908f"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"avg_line_length":{"kind":"number","value":140.3431526167,"string":"140.343153"},"max_line_length":{"kind":"number","value":114140,"string":"114,140"},"alphanum_fraction":{"kind":"number","value":0.8334989449,"string":"0.833499"},"cells":{"kind":"list like","value":[[["# Covid 19 Prediction Study - CBC","_____no_output_____"],["### Importing libraries","_____no_output_____"]],[["import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n","_____no_output_____"]],[["## Baskent Data","_____no_output_____"]],[["# başkent university data\nveriler = pd.read_excel(r'covid data 05.xlsx')","_____no_output_____"],["# başkent uni data\nprint('total number of pcr results: ',len(veriler['pcr']))\nprint('number of positive pcr results: ',len(veriler[veriler['pcr']=='positive']))\nprint('number of negative pcr results: ',len(veriler[veriler['pcr']=='negative']))","total number of pcr results: 1391\nnumber of positive pcr results: 707\nnumber of negative pcr results: 684\n"]],[["## Sao Paulo dataset","_____no_output_____"]],[["veri_saopaulo = pd.read_excel(r'sao_dataset.xlsx' )","_____no_output_____"],["\nprint('total number of pcr results: ',len(veri_saopaulo['SARS-Cov-2 exam result']))\nprint('number of positive pcr results: ',len(veri_saopaulo[veri_saopaulo['SARS-Cov-2 exam result']=='positive']))\nprint('number of negative pcr results: ',len(veri_saopaulo[veri_saopaulo['SARS-Cov-2 exam result']=='negative']))","total number of pcr results: 5644\nnumber of positive pcr results: 558\nnumber of negative pcr results: 5086\n"],["veri_saopaulo_l = list(veri_saopaulo.columns)\nveri_saopaulo_l","_____no_output_____"],["veri_saopaulo_l2 = ['Hematocrit', 'Hemoglobin', 'Platelets', 'Mean platelet volume ', \n'Red blood Cells', 'Lymphocytes', 'Mean corpuscular hemoglobin concentration\\xa0(MCHC)',\n 'Leukocytes', 'Basophils', 'Mean corpuscular hemoglobin (MCH)', 'Eosinophils',\n 'Mean corpuscular volume (MCV)', 'Monocytes','Red blood cell distribution width (RDW)']","_____no_output_____"],["len(veri_saopaulo_l2)","_____no_output_____"],["veriler_sao_cbc = veri_saopaulo[['Hemoglobin','Hematocrit', 'Lymphocytes', 'Leukocytes'\n ,'Mean corpuscular hemoglobin (MCH)','Mean corpuscular hemoglobin concentration (MCHC)'\n ,'Mean corpuscular volume (MCV)','Monocytes','Neutrophils','Basophils','Eosinophils'\n ,'Red blood Cells','Red blood cell distribution width (RDW)','Platelets','SARS-Cov-2 exam result']]\nveriler_sao_cbc = veriler_sao_cbc.dropna(axis=0)\nveriler_sao_cbc.describe()","_____no_output_____"],["# PCR result to integer (0: negative, 1: positive)\n\nfrom sklearn.preprocessing import LabelEncoder\n\nle = LabelEncoder()\nveriler_sao_cbc[\"PCR_result\"] = le.fit_transform(veriler_sao_cbc[\"SARS-Cov-2 exam result\"])\nveriler_sao_cbc.head()","_____no_output_____"],["# Sao Paulo Data\nprint('total number of pcr results: ',len(veriler_sao_cbc['SARS-Cov-2 exam result']))\nprint('number of positive pcr results: ',len(veriler_sao_cbc[veriler_sao_cbc['SARS-Cov-2 exam result']=='positive']))\nprint('number of negative pcr results: ',len(veriler_sao_cbc[veriler_sao_cbc['SARS-Cov-2 exam result']=='negative']))","total number of pcr results: 513\nnumber of positive pcr results: 75\nnumber of negative pcr results: 438\n"],["# select random 75 rows to reach balanced data\n\nsaopaulo_negative = veriler_sao_cbc[veriler_sao_cbc['SARS-Cov-2 exam result']=='negative']\nsaopaulo_negative75 = saopaulo_negative.sample(n = 75)","_____no_output_____"],["saopaulo_negative75","_____no_output_____"],["saopaulo_positive75 = veriler_sao_cbc[veriler_sao_cbc['SARS-Cov-2 exam result']=='positive']","_____no_output_____"],["saopaulo_positive75","_____no_output_____"],["#concatinating positive and negative datasets\n\nsaopaulo_last = [saopaulo_positive75,saopaulo_negative75]\n\nsaopaulo_lastdf = pd.concat(saopaulo_last)","_____no_output_____"],["saopaulo_lastdf","_____no_output_____"],["Xs = saopaulo_lastdf[['Hemoglobin','Hematocrit', 'Lymphocytes', 'Leukocytes'\n ,'Mean corpuscular hemoglobin (MCH)','Mean corpuscular hemoglobin concentration (MCHC)'\n ,'Mean corpuscular volume (MCV)','Monocytes','Neutrophils','Basophils','Eosinophils'\n ,'Red blood Cells','Red blood cell distribution width (RDW)','Platelets']].values\n\nYs = saopaulo_lastdf['PCR_result'].values","_____no_output_____"]],[["### Baskent Data features (demographic data)","_____no_output_____"]],[["# Exporting demographical data to excel\n\nveriler.describe().to_excel(r'/Users/hikmetcancubukcu/Desktop/covidai/veriler başkent covid/covid cbc demographic2.xlsx')","_____no_output_____"],["veriler.info()","\nRangeIndex: 1391 entries, 0 to 1390\nData columns (total 24 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 hastano 1391 non-null int64 \n 1 yasiondalik 1391 non-null float64\n 2 cinsiyet 1391 non-null object \n 3 alanin_aminotransferaz 1391 non-null int64 \n 4 aspartat_aminotransferaz 1391 non-null int64 \n 5 basophils 1391 non-null float64\n 6 c_reactive_protein 1391 non-null float64\n 7 eosinophils 1391 non-null float64\n 8 hb 1391 non-null float64\n 9 hct 1391 non-null float64\n 10 kreatinin 1391 non-null float64\n 11 laktat_dehidrogenaz 1391 non-null int64 \n 12 lenfosit 1391 non-null float64\n 13 lokosit 1391 non-null float64\n 14 mch 1391 non-null float64\n 15 mchc 1391 non-null float64\n 16 mcv 1391 non-null float64\n 17 monocytes 1391 non-null float64\n 18 notrofil 1391 non-null float64\n 19 rbc 1391 non-null float64\n 20 rdw 1391 non-null float64\n 21 total_bilirubin 1391 non-null float64\n 22 trombosit 1391 non-null float64\n 23 pcr 1391 non-null object \ndtypes: float64(18), int64(4), object(2)\nmemory usage: 260.9+ KB\n"]],[["### Baskent Data preprocessing","_____no_output_____"]],[["# Gender to integer (0 : E, 1 : K)\n\nfrom sklearn.preprocessing import LabelEncoder\n\nle = LabelEncoder()\nveriler[\"gender\"] = le.fit_transform(veriler[\"cinsiyet\"])\n\n","_____no_output_____"],["# Pcr to numeric values (negative : 0 , positive : 1)\n\nveriler[\"pcr_result\"] = le.fit_transform(veriler[\"pcr\"])\n\n","_____no_output_____"],["veriler.info() # başkent uni data","\nRangeIndex: 1391 entries, 0 to 1390\nData columns (total 26 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 hastano 1391 non-null int64 \n 1 yasiondalik 1391 non-null float64\n 2 cinsiyet 1391 non-null object \n 3 alanin_aminotransferaz 1391 non-null int64 \n 4 aspartat_aminotransferaz 1391 non-null int64 \n 5 basophils 1391 non-null float64\n 6 c_reactive_protein 1391 non-null float64\n 7 eosinophils 1391 non-null float64\n 8 hb 1391 non-null float64\n 9 hct 1391 non-null float64\n 10 kreatinin 1391 non-null float64\n 11 laktat_dehidrogenaz 1391 non-null int64 \n 12 lenfosit 1391 non-null float64\n 13 lokosit 1391 non-null float64\n 14 mch 1391 non-null float64\n 15 mchc 1391 non-null float64\n 16 mcv 1391 non-null float64\n 17 monocytes 1391 non-null float64\n 18 notrofil 1391 non-null float64\n 19 rbc 1391 non-null float64\n 20 rdw 1391 non-null float64\n 21 total_bilirubin 1391 non-null float64\n 22 trombosit 1391 non-null float64\n 23 pcr 1391 non-null object \n 24 gender 1391 non-null int64 \n 25 pcr_result 1391 non-null int64 \ndtypes: float64(18), int64(6), object(2)\nmemory usage: 282.7+ KB\n"],["# Dependent & Independent variables (cbc)\n\nX = veriler[['hb','hct','lenfosit','lokosit','mch','mchc','mcv','monocytes','notrofil',\n 'basophils','eosinophils', 'rbc','rdw','trombosit']].values\nY = veriler['pcr_result'].values","_____no_output_____"],["# Train - Test Spilt (80% - 20%)\n\nfrom sklearn.model_selection import train_test_split\nx_train, x_test,y_train,y_test = train_test_split(X,Y,stratify=Y,test_size=0.20, random_state=0)\n","_____no_output_____"],["print('n of test set', len(y_test))\nprint('n of train set', len(y_train))","n of test set 279\nn of train set 1112\n"],["# Standardization\n\nfrom sklearn.preprocessing import StandardScaler\n\nsc = StandardScaler()\nX_train = sc.fit_transform(x_train)\nX_test = sc.fit_transform(x_test)","_____no_output_____"],["#confusion matrix function\n\nfrom sklearn.metrics import classification_report, confusion_matrix\nimport itertools\ndef plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n \"\"\"\n This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n \"\"\"\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n","_____no_output_____"]],[["### Logistic Regression","_____no_output_____"]],[["# importing library\n\nfrom sklearn.linear_model import LogisticRegression","_____no_output_____"],["logr= LogisticRegression(random_state=0)","_____no_output_____"],["logr.fit(X_train,y_train)","_____no_output_____"],["y_hat= logr.predict(X_test)\nyhat_logr = logr.predict_proba(X_test)\ny_hat22 = y_hat","_____no_output_____"],["# Compute confusion matrix\ncnf_matrix = confusion_matrix(y_test, y_hat, labels=[1,0])\nnp.set_printoptions(precision=2)\n\n\n# Plot non-normalized confusion matrix\nplt.figure()\nplot_confusion_matrix(cnf_matrix, classes=['PCR positive','PCR negative'],normalize= False, title='Confusion matrix')","Confusion matrix, without normalization\n[[116 26]\n [ 30 107]]\n"],["print (classification_report(y_test, y_hat))\n\nprint('precision : positive predictive value')\nprint('recall : sensitivity')"," precision recall f1-score support\n\n 0 0.80 0.78 0.79 137\n 1 0.79 0.82 0.81 142\n\n accuracy 0.80 279\n macro avg 0.80 0.80 0.80 279\nweighted avg 0.80 0.80 0.80 279\n\nprecision : positive predictive value\nrecall : sensitivity\n"],["# 10 fold cross validation\n\nfrom sklearn.model_selection import cross_val_score\n''' \n1. estimator : classifier (bizim durum)\n2. X\n3. Y\n4. cv : kaç katlamalı\n\n'''\nbasari = cross_val_score(estimator = logr, X=X_train, y=y_train , cv = 10)\nprint(basari.mean())\nprint(basari.std())","0.7967744530244529\n0.020836508667640717\n"],["# sao paulo external validation - logistic regression","_____no_output_____"],["y_hats= logr.predict(Xs)\n\nyhats_logr = logr.predict_proba(Xs)\ny_hats22 = y_hats","_____no_output_____"],["# Compute confusion matrix\ncnf_matrix = confusion_matrix(Ys, y_hats22, labels=[1,0])\nnp.set_printoptions(precision=2)\n\n\n# Plot non-normalized confusion matrix\nplt.figure()\nplot_confusion_matrix(cnf_matrix, classes=['PCR positive','PCR negative'],normalize= False, title='Confusion matrix')","Confusion matrix, without normalization\n[[67 8]\n [31 44]]\n"],["print (classification_report(Ys, y_hats))\n\nprint('precision : positive predictive value')\nprint('recall : sensitivity')"," precision recall f1-score support\n\n 0 0.85 0.59 0.69 75\n 1 0.68 0.89 0.77 75\n\n accuracy 0.74 150\n macro avg 0.76 0.74 0.73 150\nweighted avg 0.76 0.74 0.73 150\n\nprecision : positive predictive value\nrecall : sensitivity\n"]],[["### Support Vector Machines","_____no_output_____"]],[["from sklearn.svm import SVC\nsvc= SVC(kernel=\"rbf\",probability=True)","_____no_output_____"],["svc.fit(X_train, y_train)\nyhat= svc.predict(X_test)\nyhat_svm = svc.predict_proba(X_test)\nyhat4 = yhat # svm prediction => yhat4","_____no_output_____"],["# Compute confusion matrix\ncnf_matrix = confusion_matrix(y_test, yhat4, labels=[1,0])\nnp.set_printoptions(precision=2)\n\n\n# Plot non-normalized confusion matrix\nplt.figure()\nplot_confusion_matrix(cnf_matrix, classes=['PCR positive','PCR negative'],normalize= False, title='Confusion matrix')","Confusion matrix, without normalization\n[[116 26]\n [ 26 111]]\n"],["print (classification_report(y_test, yhat4))\n\nprint('precision : positive predictive value')\nprint('recall : sensitivity')"," precision recall f1-score support\n\n 0 0.81 0.81 0.81 137\n 1 0.82 0.82 0.82 142\n\n accuracy 0.81 279\n macro avg 0.81 0.81 0.81 279\nweighted avg 0.81 0.81 0.81 279\n\nprecision : positive predictive value\nrecall : sensitivity\n"],["# 10 fold cross validation\n\nfrom sklearn.model_selection import cross_val_score\n''' \n1. estimator : classifier (bizim durum)\n2. X\n3. Y\n4. cv : kaç katlamalı\n\n'''\nbasari = cross_val_score(estimator = svc, X=X_train, y=y_train , cv = 10)\nprint(basari.mean())\nprint(basari.std())","0.8084298584298585\n0.029357583533502745\n"],["# SAO PAULO EXTERNAL VALIDATION\n\ny_hats4= svc.predict(Xs)\n\nyhats2_svc = svc.predict_proba(Xs)\n\n","_____no_output_____"],["# Compute confusion matrix\ncnf_matrix = confusion_matrix(Ys, y_hats4, labels=[1,0])\nnp.set_printoptions(precision=2)\n\n\n# Plot non-normalized confusion matrix\nplt.figure()\nplot_confusion_matrix(cnf_matrix, classes=['PCR positive','PCR negative'],normalize= False, title='Confusion matrix')","Confusion matrix, without normalization\n[[70 5]\n [25 50]]\n"],["print (classification_report(Ys, y_hats4))\n\nprint('precision : positive predictive value')\nprint('recall : sensitivity')"," precision recall f1-score support\n\n 0 0.91 0.67 0.77 75\n 1 0.74 0.93 0.82 75\n\n accuracy 0.80 150\n macro avg 0.82 0.80 0.80 150\nweighted avg 0.82 0.80 0.80 150\n\nprecision : positive predictive value\nrecall : sensitivity\n"]],[["### RANDOM FOREST CLASSIFIER","_____no_output_____"]],[["from sklearn.ensemble import RandomForestClassifier\nrfc= RandomForestClassifier(n_estimators=200,criterion=\"entropy\")","_____no_output_____"],["rfc.fit(X_train,y_train)\nyhat7= rfc.predict(X_test)\nyhat_rf = rfc.predict_proba(X_test)\n","_____no_output_____"],["# Compute confusion matrix\ncnf_matrix = confusion_matrix(y_test, yhat7, labels=[1,0])\nnp.set_printoptions(precision=2)\n\n\n# Plot non-normalized confusion matrix\nplt.figure()\nplot_confusion_matrix(cnf_matrix, classes=['PCR positive','PCR negative'],normalize= False, title='Confusion matrix')","Confusion matrix, without normalization\n[[114 28]\n [ 20 117]]\n"],["print (classification_report(y_test, yhat7))\n\nprint('precision : positive predictive value')\nprint('recall : sensitivity')"," precision recall f1-score support\n\n 0 0.81 0.85 0.83 137\n 1 0.85 0.80 0.83 142\n\n accuracy 0.83 279\n macro avg 0.83 0.83 0.83 279\nweighted avg 0.83 0.83 0.83 279\n\nprecision : positive predictive value\nrecall : sensitivity\n"],["# 10 fold cross validation\n\nfrom sklearn.model_selection import cross_val_score\n''' \n1. estimator : classifier (bizim durum)\n2. X\n3. Y\n4. cv : kaç katlamalı\n\n'''\nbasari = cross_val_score(estimator = rfc, X=X_train, y=y_train , cv = 10)\nprint(basari.mean())\nprint(basari.std())","0.827324646074646\n0.03390914967191843\n"],["# SAO PAULO EXTERNAL VALIDATION\n\nyhats7= rfc.predict(Xs)\n\nyhats7_rfc = rfc.predict_proba(Xs)\n","_____no_output_____"],["# Compute confusion matrix\ncnf_matrix = confusion_matrix(Ys, yhats7, labels=[1,0])\nnp.set_printoptions(precision=2)\n\n\n# Plot non-normalized confusion matrix\nplt.figure()\nplot_confusion_matrix(cnf_matrix, classes=['PCR positive','PCR negative'],normalize= False, title='Confusion matrix')","Confusion matrix, without normalization\n[[68 7]\n [24 51]]\n"],["print (classification_report(Ys, yhats7))\n\nprint('precision : positive predictive value')\nprint('recall : sensitivity')"," precision recall f1-score support\n\n 0 0.88 0.68 0.77 75\n 1 0.74 0.91 0.81 75\n\n accuracy 0.79 150\n macro avg 0.81 0.79 0.79 150\nweighted avg 0.81 0.79 0.79 150\n\nprecision : positive predictive value\nrecall : sensitivity\n"]],[["### XGBOOST","_____no_output_____"]],[["from sklearn.ensemble import GradientBoostingClassifier\nclassifier = GradientBoostingClassifier()","_____no_output_____"],["classifier.fit(X_train, y_train)\nyhat8 = classifier.predict(X_test)\nyhat_xgboost = classifier.predict_proba(X_test)\n","_____no_output_____"],["# Compute confusion matrix\ncnf_matrix = confusion_matrix(y_test, yhat8, labels=[1,0])\nnp.set_printoptions(precision=2)\n\n\n# Plot non-normalized confusion matrix\nplt.figure()\nplot_confusion_matrix(cnf_matrix, classes=['PCR positive','PCR negative'],normalize= False, title='Confusion matrix')","Confusion matrix, without normalization\n[[109 33]\n [ 22 115]]\n"],["print (classification_report(y_test, yhat8))\n\nprint('precision : positive predictive value')\nprint('recall : sensitivity')"," precision recall f1-score support\n\n 0 0.78 0.84 0.81 137\n 1 0.83 0.77 0.80 142\n\n accuracy 0.80 279\n macro avg 0.80 0.80 0.80 279\nweighted avg 0.81 0.80 0.80 279\n\nprecision : positive predictive value\nrecall : sensitivity\n"],["# 10 fold cross validation\n\nfrom sklearn.model_selection import cross_val_score\n''' \n1. estimator : classifier (bizim durum)\n2. X\n3. Y\n4. cv : kaç katlamalı\n\n'''\nbasari = cross_val_score(estimator = rfc, X=X_train, y=y_train , cv = 10)\nprint(basari.mean())\nprint(basari.std())","0.8363014800514801\n0.0310012414545756\n"],["# SAO PAULO EXTERNAL VALIDATION\n\ny_hats8= classifier.predict(Xs)\ny_hats_xgboost = classifier.predict_proba(Xs)\n","_____no_output_____"],["# Compute confusion matrix\ncnf_matrix = confusion_matrix(Ys, y_hats8, labels=[1,0])\nnp.set_printoptions(precision=2)\n\n\n# Plot non-normalized confusion matrix\nplt.figure()\nplot_confusion_matrix(cnf_matrix, classes=['PCR positive','PCR negative'],normalize= False, title='Confusion matrix')","Confusion matrix, without normalization\n[[64 11]\n [28 47]]\n"],["print (classification_report(Ys, y_hats8))\n\nprint('precision : positive predictive value')\nprint('recall : sensitivity')"," precision recall f1-score support\n\n 0 0.81 0.63 0.71 75\n 1 0.70 0.85 0.77 75\n\n accuracy 0.74 150\n macro avg 0.75 0.74 0.74 150\nweighted avg 0.75 0.74 0.74 150\n\nprecision : positive predictive value\nrecall : sensitivity\n"]],[["## ROC & AUC","_____no_output_____"]],[["#baskent dataset\n\nfrom sklearn.metrics import roc_curve, auc\n\n\n\nlogr_fpr, logr_tpr, threshold = roc_curve(y_test, yhat_logr[:,1]) # logr roc data\nauc_logr = auc(logr_fpr, logr_tpr)\n\n\n\nsvm_fpr, svm_tpr, threshold = roc_curve(y_test, yhat_svm[:,1]) # svm roc data\nauc_svm = auc(svm_fpr, svm_tpr)\n\n\n\nrf_fpr, rf_tpr, threshold = roc_curve(y_test, yhat_rf[:,1]) # rf roc data\nauc_rf = auc(rf_fpr, rf_tpr)\n\nxgboost_fpr, xgboost_tpr, threshold = roc_curve(y_test, yhat_xgboost[:,1]) # xgboost roc data\nauc_xgboost = auc(xgboost_fpr, xgboost_tpr)\n\n\n\n\nplt.figure(figsize=(4, 4), dpi=300)\n\n\nplt.plot(rf_fpr, rf_tpr, linestyle='-', label='Random Forest (AUC = %0.3f)' % auc_rf)\nplt.plot(logr_fpr, logr_tpr, linestyle='-', label='Logistic (AUC = %0.3f)' % auc_logr)\nplt.plot(svm_fpr, svm_tpr, linestyle='-', label='SVM (AUC = %0.3f)' % auc_svm)\nplt.plot(xgboost_fpr, xgboost_tpr, linestyle='-', label='XGBoost (AUC = %0.3f)' % auc_xgboost)\n\n\n\n\n\n\n\n\nplt.xlabel('False Positive Rate')\nplt.ylabel('True Positive Rate')\n\nplt.legend(fontsize=8)\n\nplt.show()\n\n","_____no_output_____"],["# sao paulo dataset\n\nfrom sklearn.metrics import roc_curve, auc\n\n\n\nlogr_fpr, logr_tpr, threshold = roc_curve(Ys, yhats_logr[:,1]) # logr roc data\nauc_logr = auc(logr_fpr, logr_tpr)\n\n\n\nsvm_fpr, svm_tpr, threshold = roc_curve(Ys, yhats2_svc[:,1]) # svm roc data\nauc_svm = auc(svm_fpr, svm_tpr)\n\n\n\nrf_fpr, rf_tpr, threshold = roc_curve(Ys, yhats7_rfc[:,1]) # rf roc data\nauc_rf = auc(rf_fpr, rf_tpr)\n\nxgboost_fpr, xgboost_tpr, threshold = roc_curve(Ys, y_hats_xgboost[:,1]) # xgboost roc data\nauc_xgboost = auc(xgboost_fpr, xgboost_tpr)\n\n\n\n\nplt.figure(figsize=(4, 4), dpi=300)\n\nplt.plot(xgboost_fpr, xgboost_tpr, linestyle='-', label='XGBoost (AUC = %0.3f)' % auc_xgboost)\nplt.plot(rf_fpr, rf_tpr, linestyle='-', label='Random Forest (AUC = %0.3f)' % auc_rf)\nplt.plot(svm_fpr, svm_tpr, linestyle='-', label='SVM (AUC = %0.3f)' % auc_svm)\nplt.plot(logr_fpr, logr_tpr, linestyle='-', label='Logistic (AUC = %0.3f)' % auc_logr)\n\n\n\n\n\n\n\n\nplt.xlabel('False Positive Rate')\nplt.ylabel('True Positive Rate')\n\nplt.legend(fontsize=8)\n\nplt.show()\n\n","_____no_output_____"],["\nyhat22= y_hat22*1\n#yhat22 = [item for sublist in yhat22 for item in sublist]\n\nyhat33= yhat4*1\nyhat44= yhat7*1\nyhat55= yhat8*1\n\nroc_data_array = [yhat55,yhat22,yhat33,yhat44,y_test]\n\nroc_data = pd.DataFrame(data=roc_data_array)","_____no_output_____"],["roc_data.transpose().to_excel(r'roc_covid_cbc_last.xlsx')","_____no_output_____"],["# validation data cbc\n\n\nyhat222= y_hats22*1\n#yhat22 = [item for sublist in yhat22 for item in sublist]\n\nyhat333= y_hats4*1\nyhat444= yhats7*1\nyhat555= y_hats8*1\n\nroc_data_array = [yhat555,yhat222,yhat333,yhat444,Ys]\n\nroc_data = pd.DataFrame(data=roc_data_array)\nroc_data.transpose().to_excel(r'roc_covid_cbc_last_val.xlsx')","_____no_output_____"]]],"string":"[\n [\n [\n \"# Covid 19 Prediction Study - CBC\",\n \"_____no_output_____\"\n ],\n [\n \"### Importing libraries\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"import numpy as np\\nimport pandas as pd\\nimport matplotlib.pyplot as plt\\n\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"## Baskent Data\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# başkent university data\\nveriler = pd.read_excel(r'covid data 05.xlsx')\",\n \"_____no_output_____\"\n ],\n [\n \"# başkent uni data\\nprint('total number of pcr results: ',len(veriler['pcr']))\\nprint('number of positive pcr results: ',len(veriler[veriler['pcr']=='positive']))\\nprint('number of negative pcr results: ',len(veriler[veriler['pcr']=='negative']))\",\n \"total number of pcr results: 1391\\nnumber of positive pcr results: 707\\nnumber of negative pcr results: 684\\n\"\n ]\n ],\n [\n [\n \"## Sao Paulo dataset\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"veri_saopaulo = pd.read_excel(r'sao_dataset.xlsx' )\",\n \"_____no_output_____\"\n ],\n [\n \"\\nprint('total number of pcr results: ',len(veri_saopaulo['SARS-Cov-2 exam result']))\\nprint('number of positive pcr results: ',len(veri_saopaulo[veri_saopaulo['SARS-Cov-2 exam result']=='positive']))\\nprint('number of negative pcr results: ',len(veri_saopaulo[veri_saopaulo['SARS-Cov-2 exam result']=='negative']))\",\n \"total number of pcr results: 5644\\nnumber of positive pcr results: 558\\nnumber of negative pcr results: 5086\\n\"\n ],\n [\n \"veri_saopaulo_l = list(veri_saopaulo.columns)\\nveri_saopaulo_l\",\n \"_____no_output_____\"\n ],\n [\n \"veri_saopaulo_l2 = ['Hematocrit', 'Hemoglobin', 'Platelets', 'Mean platelet volume ', \\n'Red blood Cells', 'Lymphocytes', 'Mean corpuscular hemoglobin concentration\\\\xa0(MCHC)',\\n 'Leukocytes', 'Basophils', 'Mean corpuscular hemoglobin (MCH)', 'Eosinophils',\\n 'Mean corpuscular volume (MCV)', 'Monocytes','Red blood cell distribution width (RDW)']\",\n \"_____no_output_____\"\n ],\n [\n \"len(veri_saopaulo_l2)\",\n \"_____no_output_____\"\n ],\n [\n \"veriler_sao_cbc = veri_saopaulo[['Hemoglobin','Hematocrit', 'Lymphocytes', 'Leukocytes'\\n ,'Mean corpuscular hemoglobin (MCH)','Mean corpuscular hemoglobin concentration (MCHC)'\\n ,'Mean corpuscular volume (MCV)','Monocytes','Neutrophils','Basophils','Eosinophils'\\n ,'Red blood Cells','Red blood cell distribution width (RDW)','Platelets','SARS-Cov-2 exam result']]\\nveriler_sao_cbc = veriler_sao_cbc.dropna(axis=0)\\nveriler_sao_cbc.describe()\",\n \"_____no_output_____\"\n ],\n [\n \"# PCR result to integer (0: negative, 1: positive)\\n\\nfrom sklearn.preprocessing import LabelEncoder\\n\\nle = LabelEncoder()\\nveriler_sao_cbc[\\\"PCR_result\\\"] = le.fit_transform(veriler_sao_cbc[\\\"SARS-Cov-2 exam result\\\"])\\nveriler_sao_cbc.head()\",\n \"_____no_output_____\"\n ],\n [\n \"# Sao Paulo Data\\nprint('total number of pcr results: ',len(veriler_sao_cbc['SARS-Cov-2 exam result']))\\nprint('number of positive pcr results: ',len(veriler_sao_cbc[veriler_sao_cbc['SARS-Cov-2 exam result']=='positive']))\\nprint('number of negative pcr results: ',len(veriler_sao_cbc[veriler_sao_cbc['SARS-Cov-2 exam result']=='negative']))\",\n \"total number of pcr results: 513\\nnumber of positive pcr results: 75\\nnumber of negative pcr results: 438\\n\"\n ],\n [\n \"# select random 75 rows to reach balanced data\\n\\nsaopaulo_negative = veriler_sao_cbc[veriler_sao_cbc['SARS-Cov-2 exam result']=='negative']\\nsaopaulo_negative75 = saopaulo_negative.sample(n = 75)\",\n \"_____no_output_____\"\n ],\n [\n \"saopaulo_negative75\",\n \"_____no_output_____\"\n ],\n [\n \"saopaulo_positive75 = veriler_sao_cbc[veriler_sao_cbc['SARS-Cov-2 exam result']=='positive']\",\n \"_____no_output_____\"\n ],\n [\n \"saopaulo_positive75\",\n \"_____no_output_____\"\n ],\n [\n \"#concatinating positive and negative datasets\\n\\nsaopaulo_last = [saopaulo_positive75,saopaulo_negative75]\\n\\nsaopaulo_lastdf = pd.concat(saopaulo_last)\",\n \"_____no_output_____\"\n ],\n [\n \"saopaulo_lastdf\",\n \"_____no_output_____\"\n ],\n [\n \"Xs = saopaulo_lastdf[['Hemoglobin','Hematocrit', 'Lymphocytes', 'Leukocytes'\\n ,'Mean corpuscular hemoglobin (MCH)','Mean corpuscular hemoglobin concentration (MCHC)'\\n ,'Mean corpuscular volume (MCV)','Monocytes','Neutrophils','Basophils','Eosinophils'\\n ,'Red blood Cells','Red blood cell distribution width (RDW)','Platelets']].values\\n\\nYs = saopaulo_lastdf['PCR_result'].values\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"### Baskent Data features (demographic data)\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# Exporting demographical data to excel\\n\\nveriler.describe().to_excel(r'/Users/hikmetcancubukcu/Desktop/covidai/veriler başkent covid/covid cbc demographic2.xlsx')\",\n \"_____no_output_____\"\n ],\n [\n \"veriler.info()\",\n \"\\nRangeIndex: 1391 entries, 0 to 1390\\nData columns (total 24 columns):\\n # Column Non-Null Count Dtype \\n--- ------ -------------- ----- \\n 0 hastano 1391 non-null int64 \\n 1 yasiondalik 1391 non-null float64\\n 2 cinsiyet 1391 non-null object \\n 3 alanin_aminotransferaz 1391 non-null int64 \\n 4 aspartat_aminotransferaz 1391 non-null int64 \\n 5 basophils 1391 non-null float64\\n 6 c_reactive_protein 1391 non-null float64\\n 7 eosinophils 1391 non-null float64\\n 8 hb 1391 non-null float64\\n 9 hct 1391 non-null float64\\n 10 kreatinin 1391 non-null float64\\n 11 laktat_dehidrogenaz 1391 non-null int64 \\n 12 lenfosit 1391 non-null float64\\n 13 lokosit 1391 non-null float64\\n 14 mch 1391 non-null float64\\n 15 mchc 1391 non-null float64\\n 16 mcv 1391 non-null float64\\n 17 monocytes 1391 non-null float64\\n 18 notrofil 1391 non-null float64\\n 19 rbc 1391 non-null float64\\n 20 rdw 1391 non-null float64\\n 21 total_bilirubin 1391 non-null float64\\n 22 trombosit 1391 non-null float64\\n 23 pcr 1391 non-null object \\ndtypes: float64(18), int64(4), object(2)\\nmemory usage: 260.9+ KB\\n\"\n ]\n ],\n [\n [\n \"### Baskent Data preprocessing\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# Gender to integer (0 : E, 1 : K)\\n\\nfrom sklearn.preprocessing import LabelEncoder\\n\\nle = LabelEncoder()\\nveriler[\\\"gender\\\"] = le.fit_transform(veriler[\\\"cinsiyet\\\"])\\n\\n\",\n \"_____no_output_____\"\n ],\n [\n \"# Pcr to numeric values (negative : 0 , positive : 1)\\n\\nveriler[\\\"pcr_result\\\"] = le.fit_transform(veriler[\\\"pcr\\\"])\\n\\n\",\n \"_____no_output_____\"\n ],\n [\n \"veriler.info() # başkent uni data\",\n \"\\nRangeIndex: 1391 entries, 0 to 1390\\nData columns (total 26 columns):\\n # Column Non-Null Count Dtype \\n--- ------ -------------- ----- \\n 0 hastano 1391 non-null int64 \\n 1 yasiondalik 1391 non-null float64\\n 2 cinsiyet 1391 non-null object \\n 3 alanin_aminotransferaz 1391 non-null int64 \\n 4 aspartat_aminotransferaz 1391 non-null int64 \\n 5 basophils 1391 non-null float64\\n 6 c_reactive_protein 1391 non-null float64\\n 7 eosinophils 1391 non-null float64\\n 8 hb 1391 non-null float64\\n 9 hct 1391 non-null float64\\n 10 kreatinin 1391 non-null float64\\n 11 laktat_dehidrogenaz 1391 non-null int64 \\n 12 lenfosit 1391 non-null float64\\n 13 lokosit 1391 non-null float64\\n 14 mch 1391 non-null float64\\n 15 mchc 1391 non-null float64\\n 16 mcv 1391 non-null float64\\n 17 monocytes 1391 non-null float64\\n 18 notrofil 1391 non-null float64\\n 19 rbc 1391 non-null float64\\n 20 rdw 1391 non-null float64\\n 21 total_bilirubin 1391 non-null float64\\n 22 trombosit 1391 non-null float64\\n 23 pcr 1391 non-null object \\n 24 gender 1391 non-null int64 \\n 25 pcr_result 1391 non-null int64 \\ndtypes: float64(18), int64(6), object(2)\\nmemory usage: 282.7+ KB\\n\"\n ],\n [\n \"# Dependent & Independent variables (cbc)\\n\\nX = veriler[['hb','hct','lenfosit','lokosit','mch','mchc','mcv','monocytes','notrofil',\\n 'basophils','eosinophils', 'rbc','rdw','trombosit']].values\\nY = veriler['pcr_result'].values\",\n \"_____no_output_____\"\n ],\n [\n \"# Train - Test Spilt (80% - 20%)\\n\\nfrom sklearn.model_selection import train_test_split\\nx_train, x_test,y_train,y_test = train_test_split(X,Y,stratify=Y,test_size=0.20, random_state=0)\\n\",\n \"_____no_output_____\"\n ],\n [\n \"print('n of test set', len(y_test))\\nprint('n of train set', len(y_train))\",\n \"n of test set 279\\nn of train set 1112\\n\"\n ],\n [\n \"# Standardization\\n\\nfrom sklearn.preprocessing import StandardScaler\\n\\nsc = StandardScaler()\\nX_train = sc.fit_transform(x_train)\\nX_test = sc.fit_transform(x_test)\",\n \"_____no_output_____\"\n ],\n [\n \"#confusion matrix function\\n\\nfrom sklearn.metrics import classification_report, confusion_matrix\\nimport itertools\\ndef plot_confusion_matrix(cm, classes,\\n normalize=False,\\n title='Confusion matrix',\\n cmap=plt.cm.Blues):\\n \\\"\\\"\\\"\\n This function prints and plots the confusion matrix.\\n Normalization can be applied by setting `normalize=True`.\\n \\\"\\\"\\\"\\n if normalize:\\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\\n print(\\\"Normalized confusion matrix\\\")\\n else:\\n print('Confusion matrix, without normalization')\\n\\n print(cm)\\n\\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\\n plt.title(title)\\n plt.colorbar()\\n tick_marks = np.arange(len(classes))\\n plt.xticks(tick_marks, classes, rotation=45)\\n plt.yticks(tick_marks, classes)\\n\\n fmt = '.2f' if normalize else 'd'\\n thresh = cm.max() / 2.\\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\\n plt.text(j, i, format(cm[i, j], fmt),\\n horizontalalignment=\\\"center\\\",\\n color=\\\"white\\\" if cm[i, j] > thresh else \\\"black\\\")\\n\\n plt.tight_layout()\\n plt.ylabel('True label')\\n plt.xlabel('Predicted label')\\n\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"### Logistic Regression\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# importing library\\n\\nfrom sklearn.linear_model import LogisticRegression\",\n \"_____no_output_____\"\n ],\n [\n \"logr= LogisticRegression(random_state=0)\",\n \"_____no_output_____\"\n ],\n [\n \"logr.fit(X_train,y_train)\",\n \"_____no_output_____\"\n ],\n [\n \"y_hat= logr.predict(X_test)\\nyhat_logr = logr.predict_proba(X_test)\\ny_hat22 = y_hat\",\n \"_____no_output_____\"\n ],\n [\n \"# Compute confusion matrix\\ncnf_matrix = confusion_matrix(y_test, y_hat, labels=[1,0])\\nnp.set_printoptions(precision=2)\\n\\n\\n# Plot non-normalized confusion matrix\\nplt.figure()\\nplot_confusion_matrix(cnf_matrix, classes=['PCR positive','PCR negative'],normalize= False, title='Confusion matrix')\",\n \"Confusion matrix, without normalization\\n[[116 26]\\n [ 30 107]]\\n\"\n ],\n [\n \"print (classification_report(y_test, y_hat))\\n\\nprint('precision : positive predictive value')\\nprint('recall : sensitivity')\",\n \" precision recall f1-score support\\n\\n 0 0.80 0.78 0.79 137\\n 1 0.79 0.82 0.81 142\\n\\n accuracy 0.80 279\\n macro avg 0.80 0.80 0.80 279\\nweighted avg 0.80 0.80 0.80 279\\n\\nprecision : positive predictive value\\nrecall : sensitivity\\n\"\n ],\n [\n \"# 10 fold cross validation\\n\\nfrom sklearn.model_selection import cross_val_score\\n''' \\n1. estimator : classifier (bizim durum)\\n2. X\\n3. Y\\n4. cv : kaç katlamalı\\n\\n'''\\nbasari = cross_val_score(estimator = logr, X=X_train, y=y_train , cv = 10)\\nprint(basari.mean())\\nprint(basari.std())\",\n \"0.7967744530244529\\n0.020836508667640717\\n\"\n ],\n [\n \"# sao paulo external validation - logistic regression\",\n \"_____no_output_____\"\n ],\n [\n \"y_hats= logr.predict(Xs)\\n\\nyhats_logr = logr.predict_proba(Xs)\\ny_hats22 = y_hats\",\n \"_____no_output_____\"\n ],\n [\n \"# Compute confusion matrix\\ncnf_matrix = confusion_matrix(Ys, y_hats22, labels=[1,0])\\nnp.set_printoptions(precision=2)\\n\\n\\n# Plot non-normalized confusion matrix\\nplt.figure()\\nplot_confusion_matrix(cnf_matrix, classes=['PCR positive','PCR negative'],normalize= False, title='Confusion matrix')\",\n \"Confusion matrix, without normalization\\n[[67 8]\\n [31 44]]\\n\"\n ],\n [\n \"print (classification_report(Ys, y_hats))\\n\\nprint('precision : positive predictive value')\\nprint('recall : sensitivity')\",\n \" precision recall f1-score support\\n\\n 0 0.85 0.59 0.69 75\\n 1 0.68 0.89 0.77 75\\n\\n accuracy 0.74 150\\n macro avg 0.76 0.74 0.73 150\\nweighted avg 0.76 0.74 0.73 150\\n\\nprecision : positive predictive value\\nrecall : sensitivity\\n\"\n ]\n ],\n [\n [\n \"### Support Vector Machines\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"from sklearn.svm import SVC\\nsvc= SVC(kernel=\\\"rbf\\\",probability=True)\",\n \"_____no_output_____\"\n ],\n [\n \"svc.fit(X_train, y_train)\\nyhat= svc.predict(X_test)\\nyhat_svm = svc.predict_proba(X_test)\\nyhat4 = yhat # svm prediction => yhat4\",\n \"_____no_output_____\"\n ],\n [\n \"# Compute confusion matrix\\ncnf_matrix = confusion_matrix(y_test, yhat4, labels=[1,0])\\nnp.set_printoptions(precision=2)\\n\\n\\n# Plot non-normalized confusion matrix\\nplt.figure()\\nplot_confusion_matrix(cnf_matrix, classes=['PCR positive','PCR negative'],normalize= False, title='Confusion matrix')\",\n \"Confusion matrix, without normalization\\n[[116 26]\\n [ 26 111]]\\n\"\n ],\n [\n \"print (classification_report(y_test, yhat4))\\n\\nprint('precision : positive predictive value')\\nprint('recall : sensitivity')\",\n \" precision recall f1-score support\\n\\n 0 0.81 0.81 0.81 137\\n 1 0.82 0.82 0.82 142\\n\\n accuracy 0.81 279\\n macro avg 0.81 0.81 0.81 279\\nweighted avg 0.81 0.81 0.81 279\\n\\nprecision : positive predictive value\\nrecall : sensitivity\\n\"\n ],\n [\n \"# 10 fold cross validation\\n\\nfrom sklearn.model_selection import cross_val_score\\n''' \\n1. estimator : classifier (bizim durum)\\n2. X\\n3. Y\\n4. cv : kaç katlamalı\\n\\n'''\\nbasari = cross_val_score(estimator = svc, X=X_train, y=y_train , cv = 10)\\nprint(basari.mean())\\nprint(basari.std())\",\n \"0.8084298584298585\\n0.029357583533502745\\n\"\n ],\n [\n \"# SAO PAULO EXTERNAL VALIDATION\\n\\ny_hats4= svc.predict(Xs)\\n\\nyhats2_svc = svc.predict_proba(Xs)\\n\\n\",\n \"_____no_output_____\"\n ],\n [\n \"# Compute confusion matrix\\ncnf_matrix = confusion_matrix(Ys, y_hats4, labels=[1,0])\\nnp.set_printoptions(precision=2)\\n\\n\\n# Plot non-normalized confusion matrix\\nplt.figure()\\nplot_confusion_matrix(cnf_matrix, classes=['PCR positive','PCR negative'],normalize= False, title='Confusion matrix')\",\n \"Confusion matrix, without normalization\\n[[70 5]\\n [25 50]]\\n\"\n ],\n [\n \"print (classification_report(Ys, y_hats4))\\n\\nprint('precision : positive predictive value')\\nprint('recall : sensitivity')\",\n \" precision recall f1-score support\\n\\n 0 0.91 0.67 0.77 75\\n 1 0.74 0.93 0.82 75\\n\\n accuracy 0.80 150\\n macro avg 0.82 0.80 0.80 150\\nweighted avg 0.82 0.80 0.80 150\\n\\nprecision : positive predictive value\\nrecall : sensitivity\\n\"\n ]\n ],\n [\n [\n \"### RANDOM FOREST CLASSIFIER\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"from sklearn.ensemble import RandomForestClassifier\\nrfc= RandomForestClassifier(n_estimators=200,criterion=\\\"entropy\\\")\",\n \"_____no_output_____\"\n ],\n [\n \"rfc.fit(X_train,y_train)\\nyhat7= rfc.predict(X_test)\\nyhat_rf = rfc.predict_proba(X_test)\\n\",\n \"_____no_output_____\"\n ],\n [\n \"# Compute confusion matrix\\ncnf_matrix = confusion_matrix(y_test, yhat7, labels=[1,0])\\nnp.set_printoptions(precision=2)\\n\\n\\n# Plot non-normalized confusion matrix\\nplt.figure()\\nplot_confusion_matrix(cnf_matrix, classes=['PCR positive','PCR negative'],normalize= False, title='Confusion matrix')\",\n \"Confusion matrix, without normalization\\n[[114 28]\\n [ 20 117]]\\n\"\n ],\n [\n \"print (classification_report(y_test, yhat7))\\n\\nprint('precision : positive predictive value')\\nprint('recall : sensitivity')\",\n \" precision recall f1-score support\\n\\n 0 0.81 0.85 0.83 137\\n 1 0.85 0.80 0.83 142\\n\\n accuracy 0.83 279\\n macro avg 0.83 0.83 0.83 279\\nweighted avg 0.83 0.83 0.83 279\\n\\nprecision : positive predictive value\\nrecall : sensitivity\\n\"\n ],\n [\n \"# 10 fold cross validation\\n\\nfrom sklearn.model_selection import cross_val_score\\n''' \\n1. estimator : classifier (bizim durum)\\n2. X\\n3. Y\\n4. cv : kaç katlamalı\\n\\n'''\\nbasari = cross_val_score(estimator = rfc, X=X_train, y=y_train , cv = 10)\\nprint(basari.mean())\\nprint(basari.std())\",\n \"0.827324646074646\\n0.03390914967191843\\n\"\n ],\n [\n \"# SAO PAULO EXTERNAL VALIDATION\\n\\nyhats7= rfc.predict(Xs)\\n\\nyhats7_rfc = rfc.predict_proba(Xs)\\n\",\n \"_____no_output_____\"\n ],\n [\n \"# Compute confusion matrix\\ncnf_matrix = confusion_matrix(Ys, yhats7, labels=[1,0])\\nnp.set_printoptions(precision=2)\\n\\n\\n# Plot non-normalized confusion matrix\\nplt.figure()\\nplot_confusion_matrix(cnf_matrix, classes=['PCR positive','PCR negative'],normalize= False, title='Confusion matrix')\",\n \"Confusion matrix, without normalization\\n[[68 7]\\n [24 51]]\\n\"\n ],\n [\n \"print (classification_report(Ys, yhats7))\\n\\nprint('precision : positive predictive value')\\nprint('recall : sensitivity')\",\n \" precision recall f1-score support\\n\\n 0 0.88 0.68 0.77 75\\n 1 0.74 0.91 0.81 75\\n\\n accuracy 0.79 150\\n macro avg 0.81 0.79 0.79 150\\nweighted avg 0.81 0.79 0.79 150\\n\\nprecision : positive predictive value\\nrecall : sensitivity\\n\"\n ]\n ],\n [\n [\n \"### XGBOOST\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"from sklearn.ensemble import GradientBoostingClassifier\\nclassifier = GradientBoostingClassifier()\",\n \"_____no_output_____\"\n ],\n [\n \"classifier.fit(X_train, y_train)\\nyhat8 = classifier.predict(X_test)\\nyhat_xgboost = classifier.predict_proba(X_test)\\n\",\n \"_____no_output_____\"\n ],\n [\n \"# Compute confusion matrix\\ncnf_matrix = confusion_matrix(y_test, yhat8, labels=[1,0])\\nnp.set_printoptions(precision=2)\\n\\n\\n# Plot non-normalized confusion matrix\\nplt.figure()\\nplot_confusion_matrix(cnf_matrix, classes=['PCR positive','PCR negative'],normalize= False, title='Confusion matrix')\",\n \"Confusion matrix, without normalization\\n[[109 33]\\n [ 22 115]]\\n\"\n ],\n [\n \"print (classification_report(y_test, yhat8))\\n\\nprint('precision : positive predictive value')\\nprint('recall : sensitivity')\",\n \" precision recall f1-score support\\n\\n 0 0.78 0.84 0.81 137\\n 1 0.83 0.77 0.80 142\\n\\n accuracy 0.80 279\\n macro avg 0.80 0.80 0.80 279\\nweighted avg 0.81 0.80 0.80 279\\n\\nprecision : positive predictive value\\nrecall : sensitivity\\n\"\n ],\n [\n \"# 10 fold cross validation\\n\\nfrom sklearn.model_selection import cross_val_score\\n''' \\n1. estimator : classifier (bizim durum)\\n2. X\\n3. Y\\n4. cv : kaç katlamalı\\n\\n'''\\nbasari = cross_val_score(estimator = rfc, X=X_train, y=y_train , cv = 10)\\nprint(basari.mean())\\nprint(basari.std())\",\n \"0.8363014800514801\\n0.0310012414545756\\n\"\n ],\n [\n \"# SAO PAULO EXTERNAL VALIDATION\\n\\ny_hats8= classifier.predict(Xs)\\ny_hats_xgboost = classifier.predict_proba(Xs)\\n\",\n \"_____no_output_____\"\n ],\n [\n \"# Compute confusion matrix\\ncnf_matrix = confusion_matrix(Ys, y_hats8, labels=[1,0])\\nnp.set_printoptions(precision=2)\\n\\n\\n# Plot non-normalized confusion matrix\\nplt.figure()\\nplot_confusion_matrix(cnf_matrix, classes=['PCR positive','PCR negative'],normalize= False, title='Confusion matrix')\",\n \"Confusion matrix, without normalization\\n[[64 11]\\n [28 47]]\\n\"\n ],\n [\n \"print (classification_report(Ys, y_hats8))\\n\\nprint('precision : positive predictive value')\\nprint('recall : sensitivity')\",\n \" precision recall f1-score support\\n\\n 0 0.81 0.63 0.71 75\\n 1 0.70 0.85 0.77 75\\n\\n accuracy 0.74 150\\n macro avg 0.75 0.74 0.74 150\\nweighted avg 0.75 0.74 0.74 150\\n\\nprecision : positive predictive value\\nrecall : sensitivity\\n\"\n ]\n ],\n [\n [\n \"## ROC & AUC\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"#baskent dataset\\n\\nfrom sklearn.metrics import roc_curve, auc\\n\\n\\n\\nlogr_fpr, logr_tpr, threshold = roc_curve(y_test, yhat_logr[:,1]) # logr roc data\\nauc_logr = auc(logr_fpr, logr_tpr)\\n\\n\\n\\nsvm_fpr, svm_tpr, threshold = roc_curve(y_test, yhat_svm[:,1]) # svm roc data\\nauc_svm = auc(svm_fpr, svm_tpr)\\n\\n\\n\\nrf_fpr, rf_tpr, threshold = roc_curve(y_test, yhat_rf[:,1]) # rf roc data\\nauc_rf = auc(rf_fpr, rf_tpr)\\n\\nxgboost_fpr, xgboost_tpr, threshold = roc_curve(y_test, yhat_xgboost[:,1]) # xgboost roc data\\nauc_xgboost = auc(xgboost_fpr, xgboost_tpr)\\n\\n\\n\\n\\nplt.figure(figsize=(4, 4), dpi=300)\\n\\n\\nplt.plot(rf_fpr, rf_tpr, linestyle='-', label='Random Forest (AUC = %0.3f)' % auc_rf)\\nplt.plot(logr_fpr, logr_tpr, linestyle='-', label='Logistic (AUC = %0.3f)' % auc_logr)\\nplt.plot(svm_fpr, svm_tpr, linestyle='-', label='SVM (AUC = %0.3f)' % auc_svm)\\nplt.plot(xgboost_fpr, xgboost_tpr, linestyle='-', label='XGBoost (AUC = %0.3f)' % auc_xgboost)\\n\\n\\n\\n\\n\\n\\n\\n\\nplt.xlabel('False Positive Rate')\\nplt.ylabel('True Positive Rate')\\n\\nplt.legend(fontsize=8)\\n\\nplt.show()\\n\\n\",\n \"_____no_output_____\"\n ],\n [\n \"# sao paulo dataset\\n\\nfrom sklearn.metrics import roc_curve, auc\\n\\n\\n\\nlogr_fpr, logr_tpr, threshold = roc_curve(Ys, yhats_logr[:,1]) # logr roc data\\nauc_logr = auc(logr_fpr, logr_tpr)\\n\\n\\n\\nsvm_fpr, svm_tpr, threshold = roc_curve(Ys, yhats2_svc[:,1]) # svm roc data\\nauc_svm = auc(svm_fpr, svm_tpr)\\n\\n\\n\\nrf_fpr, rf_tpr, threshold = roc_curve(Ys, yhats7_rfc[:,1]) # rf roc data\\nauc_rf = auc(rf_fpr, rf_tpr)\\n\\nxgboost_fpr, xgboost_tpr, threshold = roc_curve(Ys, y_hats_xgboost[:,1]) # xgboost roc data\\nauc_xgboost = auc(xgboost_fpr, xgboost_tpr)\\n\\n\\n\\n\\nplt.figure(figsize=(4, 4), dpi=300)\\n\\nplt.plot(xgboost_fpr, xgboost_tpr, linestyle='-', label='XGBoost (AUC = %0.3f)' % auc_xgboost)\\nplt.plot(rf_fpr, rf_tpr, linestyle='-', label='Random Forest (AUC = %0.3f)' % auc_rf)\\nplt.plot(svm_fpr, svm_tpr, linestyle='-', label='SVM (AUC = %0.3f)' % auc_svm)\\nplt.plot(logr_fpr, logr_tpr, linestyle='-', label='Logistic (AUC = %0.3f)' % auc_logr)\\n\\n\\n\\n\\n\\n\\n\\n\\nplt.xlabel('False Positive Rate')\\nplt.ylabel('True Positive Rate')\\n\\nplt.legend(fontsize=8)\\n\\nplt.show()\\n\\n\",\n \"_____no_output_____\"\n ],\n [\n \"\\nyhat22= y_hat22*1\\n#yhat22 = [item for sublist in yhat22 for item in sublist]\\n\\nyhat33= yhat4*1\\nyhat44= yhat7*1\\nyhat55= yhat8*1\\n\\nroc_data_array = [yhat55,yhat22,yhat33,yhat44,y_test]\\n\\nroc_data = pd.DataFrame(data=roc_data_array)\",\n \"_____no_output_____\"\n ],\n [\n \"roc_data.transpose().to_excel(r'roc_covid_cbc_last.xlsx')\",\n \"_____no_output_____\"\n ],\n [\n \"# validation data cbc\\n\\n\\nyhat222= y_hats22*1\\n#yhat22 = [item for sublist in yhat22 for item in sublist]\\n\\nyhat333= y_hats4*1\\nyhat444= yhats7*1\\nyhat555= y_hats8*1\\n\\nroc_data_array = [yhat555,yhat222,yhat333,yhat444,Ys]\\n\\nroc_data = pd.DataFrame(data=roc_data_array)\\nroc_data.transpose().to_excel(r'roc_covid_cbc_last_val.xlsx')\",\n \"_____no_output_____\"\n ]\n ]\n]"},"cell_types":{"kind":"list like","value":["markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code"],"string":"[\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\"\n]"},"cell_type_groups":{"kind":"list like","value":[["markdown","markdown"],["code"],["markdown"],["code","code"],["markdown"],["code","code","code","code","code","code","code","code","code","code","code","code","code","code","code"],["markdown"],["code","code"],["markdown"],["code","code","code","code","code","code","code","code"],["markdown"],["code","code","code","code","code","code","code","code","code","code","code"],["markdown"],["code","code","code","code","code","code","code","code"],["markdown"],["code","code","code","code","code","code","code","code"],["markdown"],["code","code","code","code","code","code","code","code"],["markdown"],["code","code","code","code","code"]],"string":"[\n [\n \"markdown\",\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\"\n ]\n]"}}},{"rowIdx":1458811,"cells":{"hexsha":{"kind":"string","value":"e7e2f3b1725841bf6610660dd2f4bbf18727eca6"},"size":{"kind":"number","value":10424,"string":"10,424"},"ext":{"kind":"string","value":"ipynb"},"lang":{"kind":"string","value":"Jupyter Notebook"},"max_stars_repo_path":{"kind":"string","value":"examples/notebooks/50_cartoee_quickstart.ipynb"},"max_stars_repo_name":{"kind":"string","value":"Yisheng-Li/geemap"},"max_stars_repo_head_hexsha":{"kind":"string","value":"0594917a4acedfebb85879cfe2bcb6a406a55f39"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2022-02-08T13:34:17.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2022-02-08T13:34:17.000Z"},"max_issues_repo_path":{"kind":"string","value":"examples/notebooks/50_cartoee_quickstart.ipynb"},"max_issues_repo_name":{"kind":"string","value":"Yisheng-Li/geemap"},"max_issues_repo_head_hexsha":{"kind":"string","value":"0594917a4acedfebb85879cfe2bcb6a406a55f39"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"examples/notebooks/50_cartoee_quickstart.ipynb"},"max_forks_repo_name":{"kind":"string","value":"Yisheng-Li/geemap"},"max_forks_repo_head_hexsha":{"kind":"string","value":"0594917a4acedfebb85879cfe2bcb6a406a55f39"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"avg_line_length":{"kind":"number","value":30.4795321637,"string":"30.479532"},"max_line_length":{"kind":"number","value":587,"string":"587"},"alphanum_fraction":{"kind":"number","value":0.5923829624,"string":"0.592383"},"cells":{"kind":"list like","value":[[["\"Open\n\nUncomment the following line to install [geemap](https://geemap.org) and [cartopy](https://scitools.org.uk/cartopy/docs/latest/installing.html#installing) if needed. Keep in mind that cartopy can be challenging to install. If you are unable to install cartopy on your computer, you can try Google Colab with this the [notebook example](https://colab.research.google.com/github/giswqs/geemap/blob/master/examples/notebooks/cartoee_colab.ipynb). \n\nSee below the commands to install cartopy and geemap using conda/mamba:\n\n```\nconda create -n carto python=3.8\nconda activate carto\nconda install mamba -c conda-forge\nmamba install cartopy scipy -c conda-forge\nmamba install geemap -c conda-forge\njupyter notebook\n```","_____no_output_____"]],[["# !pip install cartopy scipy\n# !pip install geemap","_____no_output_____"]],[["# How to create publication quality maps using `cartoee`\n\n`cartoee` is a lightweight module to aid in creatig publication quality maps from Earth Engine processing results without having to download data. The `cartoee` package does this by requesting png images from EE results (which are usually good enough for visualization) and `cartopy` is used to create the plots. Utility functions are available to create plot aethetics such as gridlines or color bars. **The notebook and the geemap cartoee module ([cartoee.py](https://geemap.org/cartoee)) were contributed by [Kel Markert](https://github.com/KMarkert). A huge thank you to him.**","_____no_output_____"]],[["%pylab inline\n\nimport ee\nimport geemap\n\n# import the cartoee functionality from geemap\nfrom geemap import cartoee","_____no_output_____"],["geemap.ee_initialize()","_____no_output_____"]],[["## Plotting an image\n\nIn this first example we will explore the most basic functionality including plotting and image, adding a colorbar, and adding visual aethetic features. Here we will use SRTM data to plot global elevation.","_____no_output_____"]],[["# get an image\nsrtm = ee.Image(\"CGIAR/SRTM90_V4\")","_____no_output_____"],["# geospatial region in format [E,S,W,N]\nregion = [180, -60, -180, 85] # define bounding box to request data\nvis = {'min':0, 'max':3000} # define visualization parameters for image","_____no_output_____"],["fig = plt.figure(figsize=(15, 10))\n\n# use cartoee to get a map\nax = cartoee.get_map(srtm, region=region, vis_params=vis)\n\n# add a colorbar to the map using the visualization params we passed to the map\ncartoee.add_colorbar(ax, vis, loc=\"bottom\", label=\"Elevation\", orientation=\"horizontal\")\n\n# add gridlines to the map at a specified interval\ncartoee.add_gridlines(ax, interval=[60,30], linestyle=\":\")\n\n# add coastlines using the cartopy api\nax.coastlines(color=\"red\")\n\nshow()","_____no_output_____"]],[["This is a decent map for minimal amount of code. But we can also easily use matplotlib colormaps to visualize our EE results to add more color. Here we add a `cmap` keyword to the `.get_map()` and `.add_colorbar()` functions.","_____no_output_____"]],[["fig = plt.figure(figsize=(15, 10))\n\ncmap = \"gist_earth\" # colormap we want to use\n# cmap = \"terrain\"\n\n# use cartoee to get a map\nax = cartoee.get_map(srtm, region=region, vis_params=vis, cmap=cmap)\n\n# add a colorbar to the map using the visualization params we passed to the map\ncartoee.add_colorbar(ax, vis, cmap=cmap, loc=\"right\", label=\"Elevation\", orientation=\"vertical\")\n\n# add gridlines to the map at a specified interval\ncartoee.add_gridlines(ax, interval=[60,30], linestyle=\"--\")\n\n# add coastlines using the cartopy api\nax.coastlines(color=\"red\")\n\nax.set_title(label = 'Global Elevation Map', fontsize=15)\n\nshow()","_____no_output_____"]],[["## Plotting an RGB image\n\n`cartoee` also allows for plotting of RGB image results directly. Here is an example of plotting a Landsat false-color scene.","_____no_output_____"]],[["# get a landsat image to visualize\nimage = ee.Image('LANDSAT/LC08/C01/T1_SR/LC08_044034_20140318')\n\n# define the visualization parameters to view\nvis ={\"bands\": ['B5', 'B4', 'B3'], \"min\": 0, \"max\":5000, \"gamma\":1.3}","_____no_output_____"],["fig = plt.figure(figsize=(15, 10))\n\n# use cartoee to get a map\nax = cartoee.get_map(image, vis_params=vis)\n\n# pad the view for some visual appeal\ncartoee.pad_view(ax)\n\n# add the gridlines and specify that the xtick labels be rotated 45 degrees\ncartoee.add_gridlines(ax,interval=0.5,xtick_rotation=45,linestyle=\":\")\n\n# add the coastline\nax.coastlines(color=\"yellow\")\n\nshow()","_____no_output_____"]],[["By default, if a region is not provided via the `region` keyword the whole extent of the image will be plotted as seen in the previous Landsat example. We can also zoom to a specific region of an image by defining the region to plot.","_____no_output_____"]],[["fig = plt.figure(figsize=(15, 10))\n\n# here is the bounding box of the map extent we want to use\n# formatted a [E,S,W,N]\nzoom_region = [-121.8025, 37.3458, -122.6265, 37.9178]\n\n# plot the map over the region of interest\nax = cartoee.get_map(image, vis_params=vis, region=zoom_region)\n\n# add the gridlines and specify that the xtick labels be rotated 45 degrees\ncartoee.add_gridlines(ax, interval=0.15, xtick_rotation=45, linestyle=\":\")\n\n# add coastline\nax.coastlines(color=\"yellow\")\n\nshow()","_____no_output_____"]],[["## Adding north arrow and scale bar","_____no_output_____"]],[["fig = plt.figure(figsize=(15, 10))\n\n# here is the bounding box of the map extent we want to use\n# formatted a [E,S,W,N]\nzoom_region = [-121.8025, 37.3458, -122.6265, 37.9178]\n\n# plot the map over the region of interest\nax = cartoee.get_map(image, vis_params=vis, region=zoom_region)\n\n# add the gridlines and specify that the xtick labels be rotated 45 degrees\ncartoee.add_gridlines(ax, interval=0.15, xtick_rotation=45, linestyle=\":\")\n\n# add coastline\nax.coastlines(color=\"yellow\")\n\n# add north arrow\ncartoee.add_north_arrow(ax, text=\"N\", xy=(0.05, 0.25), text_color=\"white\", arrow_color=\"white\", fontsize=20)\n\n# add scale bar\ncartoee.add_scale_bar_lite(ax, length=10, xy=(0.1, 0.05), fontsize=20, color=\"white\", unit=\"km\")\n\nax.set_title(label = 'Landsat False Color Composite (Band 5/4/3)', fontsize=15)\n\nshow()","_____no_output_____"]]],"string":"[\n [\n [\n \"\\\"Open\\n\\nUncomment the following line to install [geemap](https://geemap.org) and [cartopy](https://scitools.org.uk/cartopy/docs/latest/installing.html#installing) if needed. Keep in mind that cartopy can be challenging to install. If you are unable to install cartopy on your computer, you can try Google Colab with this the [notebook example](https://colab.research.google.com/github/giswqs/geemap/blob/master/examples/notebooks/cartoee_colab.ipynb). \\n\\nSee below the commands to install cartopy and geemap using conda/mamba:\\n\\n```\\nconda create -n carto python=3.8\\nconda activate carto\\nconda install mamba -c conda-forge\\nmamba install cartopy scipy -c conda-forge\\nmamba install geemap -c conda-forge\\njupyter notebook\\n```\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# !pip install cartopy scipy\\n# !pip install geemap\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# How to create publication quality maps using `cartoee`\\n\\n`cartoee` is a lightweight module to aid in creatig publication quality maps from Earth Engine processing results without having to download data. The `cartoee` package does this by requesting png images from EE results (which are usually good enough for visualization) and `cartopy` is used to create the plots. Utility functions are available to create plot aethetics such as gridlines or color bars. **The notebook and the geemap cartoee module ([cartoee.py](https://geemap.org/cartoee)) were contributed by [Kel Markert](https://github.com/KMarkert). A huge thank you to him.**\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"%pylab inline\\n\\nimport ee\\nimport geemap\\n\\n# import the cartoee functionality from geemap\\nfrom geemap import cartoee\",\n \"_____no_output_____\"\n ],\n [\n \"geemap.ee_initialize()\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"## Plotting an image\\n\\nIn this first example we will explore the most basic functionality including plotting and image, adding a colorbar, and adding visual aethetic features. Here we will use SRTM data to plot global elevation.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# get an image\\nsrtm = ee.Image(\\\"CGIAR/SRTM90_V4\\\")\",\n \"_____no_output_____\"\n ],\n [\n \"# geospatial region in format [E,S,W,N]\\nregion = [180, -60, -180, 85] # define bounding box to request data\\nvis = {'min':0, 'max':3000} # define visualization parameters for image\",\n \"_____no_output_____\"\n ],\n [\n \"fig = plt.figure(figsize=(15, 10))\\n\\n# use cartoee to get a map\\nax = cartoee.get_map(srtm, region=region, vis_params=vis)\\n\\n# add a colorbar to the map using the visualization params we passed to the map\\ncartoee.add_colorbar(ax, vis, loc=\\\"bottom\\\", label=\\\"Elevation\\\", orientation=\\\"horizontal\\\")\\n\\n# add gridlines to the map at a specified interval\\ncartoee.add_gridlines(ax, interval=[60,30], linestyle=\\\":\\\")\\n\\n# add coastlines using the cartopy api\\nax.coastlines(color=\\\"red\\\")\\n\\nshow()\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"This is a decent map for minimal amount of code. But we can also easily use matplotlib colormaps to visualize our EE results to add more color. Here we add a `cmap` keyword to the `.get_map()` and `.add_colorbar()` functions.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"fig = plt.figure(figsize=(15, 10))\\n\\ncmap = \\\"gist_earth\\\" # colormap we want to use\\n# cmap = \\\"terrain\\\"\\n\\n# use cartoee to get a map\\nax = cartoee.get_map(srtm, region=region, vis_params=vis, cmap=cmap)\\n\\n# add a colorbar to the map using the visualization params we passed to the map\\ncartoee.add_colorbar(ax, vis, cmap=cmap, loc=\\\"right\\\", label=\\\"Elevation\\\", orientation=\\\"vertical\\\")\\n\\n# add gridlines to the map at a specified interval\\ncartoee.add_gridlines(ax, interval=[60,30], linestyle=\\\"--\\\")\\n\\n# add coastlines using the cartopy api\\nax.coastlines(color=\\\"red\\\")\\n\\nax.set_title(label = 'Global Elevation Map', fontsize=15)\\n\\nshow()\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"## Plotting an RGB image\\n\\n`cartoee` also allows for plotting of RGB image results directly. Here is an example of plotting a Landsat false-color scene.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# get a landsat image to visualize\\nimage = ee.Image('LANDSAT/LC08/C01/T1_SR/LC08_044034_20140318')\\n\\n# define the visualization parameters to view\\nvis ={\\\"bands\\\": ['B5', 'B4', 'B3'], \\\"min\\\": 0, \\\"max\\\":5000, \\\"gamma\\\":1.3}\",\n \"_____no_output_____\"\n ],\n [\n \"fig = plt.figure(figsize=(15, 10))\\n\\n# use cartoee to get a map\\nax = cartoee.get_map(image, vis_params=vis)\\n\\n# pad the view for some visual appeal\\ncartoee.pad_view(ax)\\n\\n# add the gridlines and specify that the xtick labels be rotated 45 degrees\\ncartoee.add_gridlines(ax,interval=0.5,xtick_rotation=45,linestyle=\\\":\\\")\\n\\n# add the coastline\\nax.coastlines(color=\\\"yellow\\\")\\n\\nshow()\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"By default, if a region is not provided via the `region` keyword the whole extent of the image will be plotted as seen in the previous Landsat example. We can also zoom to a specific region of an image by defining the region to plot.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"fig = plt.figure(figsize=(15, 10))\\n\\n# here is the bounding box of the map extent we want to use\\n# formatted a [E,S,W,N]\\nzoom_region = [-121.8025, 37.3458, -122.6265, 37.9178]\\n\\n# plot the map over the region of interest\\nax = cartoee.get_map(image, vis_params=vis, region=zoom_region)\\n\\n# add the gridlines and specify that the xtick labels be rotated 45 degrees\\ncartoee.add_gridlines(ax, interval=0.15, xtick_rotation=45, linestyle=\\\":\\\")\\n\\n# add coastline\\nax.coastlines(color=\\\"yellow\\\")\\n\\nshow()\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"## Adding north arrow and scale bar\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"fig = plt.figure(figsize=(15, 10))\\n\\n# here is the bounding box of the map extent we want to use\\n# formatted a [E,S,W,N]\\nzoom_region = [-121.8025, 37.3458, -122.6265, 37.9178]\\n\\n# plot the map over the region of interest\\nax = cartoee.get_map(image, vis_params=vis, region=zoom_region)\\n\\n# add the gridlines and specify that the xtick labels be rotated 45 degrees\\ncartoee.add_gridlines(ax, interval=0.15, xtick_rotation=45, linestyle=\\\":\\\")\\n\\n# add coastline\\nax.coastlines(color=\\\"yellow\\\")\\n\\n# add north arrow\\ncartoee.add_north_arrow(ax, text=\\\"N\\\", xy=(0.05, 0.25), text_color=\\\"white\\\", arrow_color=\\\"white\\\", fontsize=20)\\n\\n# add scale bar\\ncartoee.add_scale_bar_lite(ax, length=10, xy=(0.1, 0.05), fontsize=20, color=\\\"white\\\", unit=\\\"km\\\")\\n\\nax.set_title(label = 'Landsat False Color Composite (Band 5/4/3)', fontsize=15)\\n\\nshow()\",\n \"_____no_output_____\"\n ]\n ]\n]"},"cell_types":{"kind":"list like","value":["markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code"],"string":"[\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\"\n]"},"cell_type_groups":{"kind":"list like","value":[["markdown"],["code"],["markdown"],["code","code"],["markdown"],["code","code","code"],["markdown"],["code"],["markdown"],["code","code"],["markdown"],["code"],["markdown"],["code"]],"string":"[\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ]\n]"}}},{"rowIdx":1458812,"cells":{"hexsha":{"kind":"string","value":"e7e3005dc906f41de1d4ebc7e072cfd99e827509"},"size":{"kind":"number","value":30446,"string":"30,446"},"ext":{"kind":"string","value":"ipynb"},"lang":{"kind":"string","value":"Jupyter Notebook"},"max_stars_repo_path":{"kind":"string","value":"heart_with_python.ipynb"},"max_stars_repo_name":{"kind":"string","value":"tpalczew/heart_with_python"},"max_stars_repo_head_hexsha":{"kind":"string","value":"5ae4eb2e905a6a5db7f599f7806b881156b0d2f2"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"heart_with_python.ipynb"},"max_issues_repo_name":{"kind":"string","value":"tpalczew/heart_with_python"},"max_issues_repo_head_hexsha":{"kind":"string","value":"5ae4eb2e905a6a5db7f599f7806b881156b0d2f2"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"heart_with_python.ipynb"},"max_forks_repo_name":{"kind":"string","value":"tpalczew/heart_with_python"},"max_forks_repo_head_hexsha":{"kind":"string","value":"5ae4eb2e905a6a5db7f599f7806b881156b0d2f2"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"avg_line_length":{"kind":"number","value":390.3333333333,"string":"390.333333"},"max_line_length":{"kind":"number","value":29000,"string":"29,000"},"alphanum_fraction":{"kind":"number","value":0.9477107009,"string":"0.947711"},"cells":{"kind":"list like","value":[[["import matplotlib.pyplot as plt\nimport numpy as np","_____no_output_____"],["plt.figure(figsize = [8, 7])\nt = np.arange(0,2*np.pi, 0.1)\nx = 16*np.sin(t)**3\ny = 13*np.cos(t)-5*np.cos(2*t)-2*np.cos(3*t)-np.cos(4*t)\nplt.plot(x,y)\nplt.title(\"Heart with Python\")","_____no_output_____"]]],"string":"[\n [\n [\n \"import matplotlib.pyplot as plt\\nimport numpy as np\",\n \"_____no_output_____\"\n ],\n [\n \"plt.figure(figsize = [8, 7])\\nt = np.arange(0,2*np.pi, 0.1)\\nx = 16*np.sin(t)**3\\ny = 13*np.cos(t)-5*np.cos(2*t)-2*np.cos(3*t)-np.cos(4*t)\\nplt.plot(x,y)\\nplt.title(\\\"Heart with Python\\\")\",\n \"_____no_output_____\"\n ]\n ]\n]"},"cell_types":{"kind":"list like","value":["code"],"string":"[\n \"code\"\n]"},"cell_type_groups":{"kind":"list like","value":[["code","code"]],"string":"[\n [\n \"code\",\n \"code\"\n ]\n]"}}},{"rowIdx":1458813,"cells":{"hexsha":{"kind":"string","value":"e7e315fbe89f6824927a3762f19f2fdde6ad172a"},"size":{"kind":"number","value":5546,"string":"5,546"},"ext":{"kind":"string","value":"ipynb"},"lang":{"kind":"string","value":"Jupyter Notebook"},"max_stars_repo_path":{"kind":"string","value":"Argentina - Mondiola Rock - 90 pts/Practica/TP1/ejercicio 8/.ipynb_checkpoints/Ejercicio 8-checkpoint.ipynb"},"max_stars_repo_name":{"kind":"string","value":"parolaraul/itChallengeML2017"},"max_stars_repo_head_hexsha":{"kind":"string","value":"c7e5d65ff5f9207342158dc2818638062ce3c220"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2020-10-08T16:19:18.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2020-10-08T16:19:18.000Z"},"max_issues_repo_path":{"kind":"string","value":"Argentina - Mondiola Rock - 90 pts/Practica/TP1/ejercicio 8/Ejercicio 8.ipynb"},"max_issues_repo_name":{"kind":"string","value":"parolaraul/itChallengeML2017"},"max_issues_repo_head_hexsha":{"kind":"string","value":"c7e5d65ff5f9207342158dc2818638062ce3c220"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"Argentina - Mondiola Rock - 90 pts/Practica/TP1/ejercicio 8/Ejercicio 8.ipynb"},"max_forks_repo_name":{"kind":"string","value":"parolaraul/itChallengeML2017"},"max_forks_repo_head_hexsha":{"kind":"string","value":"c7e5d65ff5f9207342158dc2818638062ce3c220"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"avg_line_length":{"kind":"number","value":31.5113636364,"string":"31.511364"},"max_line_length":{"kind":"number","value":362,"string":"362"},"alphanum_fraction":{"kind":"number","value":0.5980887126,"string":"0.598089"},"cells":{"kind":"list like","value":[[["# EJERCICIO 8\nEl trigo es uno de los tres granos más ampliamente producidos globalmente, junto al maíz y el arroz, y el más ampliamente consumido por el hombre en la civilización occidental desde la antigüedad. El grano de trigo es utilizado para hacer harina, harina integral, sémola, cerveza y una gran variedad de productos alimenticios.\nSe requiere clasificar semillas de trigo pertenecientes a las variedades Kama, Rosa y Canadiense.\nSe cuenta con 70 muestras de cada una de las variedades, a cuyas semillas se le realizaron mediciones de diferentes propiedades geométricas: Área, perímetro, compacidad, largo, ancho, coeficiente de asimetría, largo del carpelo (todos valores reales continuos).\nUtilice perceptrones o una red neuronal artificial (según resulte más conveniente) para lograr producir un clasificador de los tres tipos de semillas de trigo a partir de las muestras obtenidas. Informe el criterio empleado para decidir el tipo de clasificador entrenado y la arquitectura y los parámetros usados en su entrenamiento (según corresponda).\nUtilice para el entrenamiento sólo el 90% de las muestras disponibles de cada variedad. Informe la matriz de confusión que produce el mejor clasificador obtenido al evaluarlo con las muestras de entrenamiento e indique la matriz que ese clasificador produce al usarlo sobre el resto de las muestras reservadas para prueba.","_____no_output_____"]],[["import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nimport mpld3\n%matplotlib inline\nmpld3.enable_notebook()\nfrom cperceptron import Perceptron\nfrom cbackpropagation import ANN #, Identidad, Sigmoide\nimport patrones as magia\ndef progreso(ann, X, T, y=None, n=-1, E=None):\n if n % 20 == 0:\n print(\"Pasos: {0} - Error: {1:.32f}\".format(n, E)) \ndef progresoPerceptron(perceptron, X, T, n):\n y = perceptron.evaluar(X)\n incorrectas = (T != y).sum()\n print(\"Pasos: {0}\\tIncorrectas: {1}\\n\".format(n, incorrectas))","_____no_output_____"],["semillas = np.load('semillas.npy')\n\ndatos = semillas[:, :-1]\ntipos = semillas[:, -1]\n\n# tipos == 1 --> Kama\n# tipos == 2 --> Rosa\n# tipos == 3 --> Canadiense","_____no_output_____"],["#Armo Patrones\nclases, patronesEnt, patronesTest = magia.generar_patrones(\n magia.escalar(datos),tipos,90)\nX, T = magia.armar_patrones_y_salida_esperada(clases,patronesEnt)\nXtest, Ttest = magia.armar_patrones_y_salida_esperada(clases,patronesEnt)","_____no_output_____"],["# Crea la red neuronal\nocultas = 10\nentradas = X.shape[1]\nsalidas = T.shape[1]\nann = ANN(entradas, ocultas, salidas)\nann.reiniciar()","_____no_output_____"],["#Entreno\nE, n = ann.entrenar_rprop(X, T, min_error=0, max_pasos=5000, callback=progreso, frecuencia_callback=1000)\nprint(\"\\nRed entrenada en {0} pasos con un error de {1:.32f}\".format(n, E))","Pasos: 1000 - Error: 0.00205722829422903775650754987225\nPasos: 2000 - Error: 0.00067306802543931458244347298958\nPasos: 3000 - Error: 0.00026668858335486984043397051813\nPasos: 4000 - Error: 0.00013392867423468472125140660278\nPasos: 5000 - Error: 0.00007582658158353847816738474430\n\nRed entrenada en 5000 pasos con un error de 0.00007582658158353847816738474430\n"],["#Evaluo\nY = (ann.evaluar(Xtest) >= 0.97)\nmagia.matriz_de_confusion(Ttest,Y)","_____no_output_____"]]],"string":"[\n [\n [\n \"# EJERCICIO 8\\nEl trigo es uno de los tres granos más ampliamente producidos globalmente, junto al maíz y el arroz, y el más ampliamente consumido por el hombre en la civilización occidental desde la antigüedad. El grano de trigo es utilizado para hacer harina, harina integral, sémola, cerveza y una gran variedad de productos alimenticios.\\nSe requiere clasificar semillas de trigo pertenecientes a las variedades Kama, Rosa y Canadiense.\\nSe cuenta con 70 muestras de cada una de las variedades, a cuyas semillas se le realizaron mediciones de diferentes propiedades geométricas: Área, perímetro, compacidad, largo, ancho, coeficiente de asimetría, largo del carpelo (todos valores reales continuos).\\nUtilice perceptrones o una red neuronal artificial (según resulte más conveniente) para lograr producir un clasificador de los tres tipos de semillas de trigo a partir de las muestras obtenidas. Informe el criterio empleado para decidir el tipo de clasificador entrenado y la arquitectura y los parámetros usados en su entrenamiento (según corresponda).\\nUtilice para el entrenamiento sólo el 90% de las muestras disponibles de cada variedad. Informe la matriz de confusión que produce el mejor clasificador obtenido al evaluarlo con las muestras de entrenamiento e indique la matriz que ese clasificador produce al usarlo sobre el resto de las muestras reservadas para prueba.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"import numpy as np\\nimport matplotlib.pyplot as plt\\nimport matplotlib as mpl\\nimport mpld3\\n%matplotlib inline\\nmpld3.enable_notebook()\\nfrom cperceptron import Perceptron\\nfrom cbackpropagation import ANN #, Identidad, Sigmoide\\nimport patrones as magia\\ndef progreso(ann, X, T, y=None, n=-1, E=None):\\n if n % 20 == 0:\\n print(\\\"Pasos: {0} - Error: {1:.32f}\\\".format(n, E)) \\ndef progresoPerceptron(perceptron, X, T, n):\\n y = perceptron.evaluar(X)\\n incorrectas = (T != y).sum()\\n print(\\\"Pasos: {0}\\\\tIncorrectas: {1}\\\\n\\\".format(n, incorrectas))\",\n \"_____no_output_____\"\n ],\n [\n \"semillas = np.load('semillas.npy')\\n\\ndatos = semillas[:, :-1]\\ntipos = semillas[:, -1]\\n\\n# tipos == 1 --> Kama\\n# tipos == 2 --> Rosa\\n# tipos == 3 --> Canadiense\",\n \"_____no_output_____\"\n ],\n [\n \"#Armo Patrones\\nclases, patronesEnt, patronesTest = magia.generar_patrones(\\n magia.escalar(datos),tipos,90)\\nX, T = magia.armar_patrones_y_salida_esperada(clases,patronesEnt)\\nXtest, Ttest = magia.armar_patrones_y_salida_esperada(clases,patronesEnt)\",\n \"_____no_output_____\"\n ],\n [\n \"# Crea la red neuronal\\nocultas = 10\\nentradas = X.shape[1]\\nsalidas = T.shape[1]\\nann = ANN(entradas, ocultas, salidas)\\nann.reiniciar()\",\n \"_____no_output_____\"\n ],\n [\n \"#Entreno\\nE, n = ann.entrenar_rprop(X, T, min_error=0, max_pasos=5000, callback=progreso, frecuencia_callback=1000)\\nprint(\\\"\\\\nRed entrenada en {0} pasos con un error de {1:.32f}\\\".format(n, E))\",\n \"Pasos: 1000 - Error: 0.00205722829422903775650754987225\\nPasos: 2000 - Error: 0.00067306802543931458244347298958\\nPasos: 3000 - Error: 0.00026668858335486984043397051813\\nPasos: 4000 - Error: 0.00013392867423468472125140660278\\nPasos: 5000 - Error: 0.00007582658158353847816738474430\\n\\nRed entrenada en 5000 pasos con un error de 0.00007582658158353847816738474430\\n\"\n ],\n [\n \"#Evaluo\\nY = (ann.evaluar(Xtest) >= 0.97)\\nmagia.matriz_de_confusion(Ttest,Y)\",\n \"_____no_output_____\"\n ]\n ]\n]"},"cell_types":{"kind":"list like","value":["markdown","code"],"string":"[\n \"markdown\",\n \"code\"\n]"},"cell_type_groups":{"kind":"list like","value":[["markdown"],["code","code","code","code","code","code"]],"string":"[\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\"\n ]\n]"}}},{"rowIdx":1458814,"cells":{"hexsha":{"kind":"string","value":"e7e32245a76ac6535783706e0949b1a88cc3669e"},"size":{"kind":"number","value":128889,"string":"128,889"},"ext":{"kind":"string","value":"ipynb"},"lang":{"kind":"string","value":"Jupyter Notebook"},"max_stars_repo_path":{"kind":"string","value":"code/first_try/.ipynb_checkpoints/transformer_encoder-checkpoint.ipynb"},"max_stars_repo_name":{"kind":"string","value":"steveyu323/motor_embedding"},"max_stars_repo_head_hexsha":{"kind":"string","value":"65b05e024ca5a0aa339330eff6b63927af5ce4aa"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"code/first_try/.ipynb_checkpoints/transformer_encoder-checkpoint.ipynb"},"max_issues_repo_name":{"kind":"string","value":"steveyu323/motor_embedding"},"max_issues_repo_head_hexsha":{"kind":"string","value":"65b05e024ca5a0aa339330eff6b63927af5ce4aa"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"code/first_try/.ipynb_checkpoints/transformer_encoder-checkpoint.ipynb"},"max_forks_repo_name":{"kind":"string","value":"steveyu323/motor_embedding"},"max_forks_repo_head_hexsha":{"kind":"string","value":"65b05e024ca5a0aa339330eff6b63927af5ce4aa"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"avg_line_length":{"kind":"number","value":30.4486180014,"string":"30.448618"},"max_line_length":{"kind":"number","value":1250,"string":"1,250"},"alphanum_fraction":{"kind":"number","value":0.526119374,"string":"0.526119"},"cells":{"kind":"list like","value":[[["# Model Description\n- Apply a transformer based model to pfam/unirep_50 data and extract the embedding features\n> In this tutorial, we train nn.TransformerEncoder model on a language modeling task. The language modeling task is to assign a probability for the likelihood of a given word (or a sequence of words) to follow a sequence of words. A sequence of tokens are passed to the embedding layer first, followed by a positional encoding layer to account for the order of the word (see the next paragraph for more details). The nn.TransformerEncoder consists of multiple layers of nn.TransformerEncoderLayer. Along with the input sequence, a square attention mask is required because the self-attention layers in nn.TransformerEncoder are only allowed to attend the earlier positions in the sequence. For the language modeling task, any tokens on the future positions should be masked. To have the actual words, the output of nn.TransformerEncoder model is sent to the final Linear layer, which is followed by a log-Softmax function.\n\n## Math and model formulation and code reference:\n- Attention is all you need https://arxiv.org/abs/1706.03762\n- ResNet https://towardsdatascience.com/understanding-and-visualizing-resnets-442284831be8\n- MIT Visualization http://jalammar.github.io/illustrated-transformer/\n- An Annotated transformer http://nlp.seas.harvard.edu/2018/04/03/attention.html#a-real-world-example","_____no_output_____"]],[["import math\nimport torch.nn as nn\nimport argparse\nimport random\nimport warnings\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom torch import optim\nimport torch.backends.cudnn as cudnn\nfrom torch.utils.data import DataLoader\nfrom torch.utils.data import Dataset\nfrom torch.autograd import Variable\nimport itertools\nimport pandas as pd\n# seed = 7\n# torch.manual_seed(seed)\n# np.random.seed(seed)\n\npfamA_motors = pd.read_csv(\"../data/pfamA_motors.csv\")\ndf_dev = pd.read_csv(\"../data/df_dev.csv\")\npfamA_motors = pfamA_motors.iloc[:,1:]\nclan_train_dat = pfamA_motors.groupby(\"clan\").head(4000)\nclan_train_dat = clan_train_dat.sample(frac=1).reset_index(drop=True)\nclan_test_dat = pfamA_motors.loc[~pfamA_motors[\"id\"].isin(clan_train_dat[\"id\"]),:].groupby(\"clan\").head(400)\n\nclan_train_dat.shape\n\ndef df_to_tup(dat):\n data = []\n for i in range(dat.shape[0]):\n row = dat.iloc[i,:]\n tup = (row[\"seq\"],row[\"clan\"])\n data.append(tup)\n return data\n\nclan_training_data = df_to_tup(clan_train_dat)\nclan_test_data = df_to_tup(clan_test_dat)\nfor seq,clan in clan_training_data:\n print(seq)\n print(clan)\n break\n\naminoacid_list = [\n 'A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L',\n 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'V', 'W', 'Y'\n]\nclan_list = [\"actin_like\",\"tubulin_c\",\"tubulin_binding\",\"p_loop_gtpase\"]\n \naa_to_ix = dict(zip(aminoacid_list, np.arange(1, 21)))\nclan_to_ix = dict(zip(clan_list, np.arange(0, 4)))\n\ndef word_to_index(seq,to_ix):\n \"Returns a list of indices (integers) from a list of words.\"\n return [to_ix.get(word, 0) for word in seq]\n\nix_to_aa = dict(zip(np.arange(1, 21), aminoacid_list))\nix_to_clan = dict(zip(np.arange(0, 4), clan_list))\n\ndef index_to_word(ixs,ix_to): \n \"Returns a list of words, given a list of their corresponding indices.\"\n return [ix_to.get(ix, 'X') for ix in ixs]\n\ndef prepare_sequence(seq):\n idxs = word_to_index(seq[0:-1],aa_to_ix)\n return torch.tensor(idxs, dtype=torch.long)\n\ndef prepare_labels(seq):\n idxs = word_to_index(seq[1:],aa_to_ix)\n return torch.tensor(idxs, dtype=torch.long)\n\nprepare_labels('YCHXXXXX')\n\n","DGRIPQRDVAAKLVIVMVGLPARGKSYITKKLQRYLSWQQHESRIFNVGNRRRNAAGIKTSARANSGQALDPPVEAATI\np_loop_gtpase\n"],["# set device\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\ndevice","_____no_output_____"],["class PositionalEncoding(nn.Module):\n \"\"\"\n PositionalEncoding module injects some information about the relative or absolute position of\n the tokens in the sequence. The positional encodings have the same dimension as the embeddings \n so that the two can be summed. Here, we use sine and cosine functions of different frequencies.\n \"\"\"\n def __init__(self, d_model, dropout=0.1, max_len=5000):\n super(PositionalEncoding, self).__init__()\n self.dropout = nn.Dropout(p=dropout)\n\n pe = torch.zeros(max_len, d_model)\n position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)\n div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))\n pe[:, 0::2] = torch.sin(position * div_term)\n pe[:, 1::2] = torch.cos(position * div_term)\n pe = pe.unsqueeze(0).transpose(0, 1)\n \n# pe[:, 0::2] = torch.sin(position * div_term)\n# pe[:, 1::2] = torch.cos(position * div_term)\n# pe = pe.unsqueeze(0)\n \n self.register_buffer('pe', pe)\n\n def forward(self, x):\n# x = x + self.pe[:x.size(0), :]\n# print(\"x.size() : \", x.size())\n# print(\"self.pe.size() :\", self.pe[:x.size(0),:,:].size())\n x = torch.add(x ,Variable(self.pe[:x.size(0),:,:], requires_grad=False))\n return self.dropout(x)","_____no_output_____"],["\n \nclass TransformerModel(nn.Module):\n\n def __init__(self, ntoken, ninp, nhead, nhid, nlayers, dropout=0.5):\n super(TransformerModel, self).__init__()\n from torch.nn import TransformerEncoder, TransformerEncoderLayer\n self.model_type = 'Transformer'\n self.src_mask = None\n self.pos_encoder = PositionalEncoding(ninp)\n encoder_layers = TransformerEncoderLayer(ninp, nhead, nhid, dropout)\n self.transformer_encoder = TransformerEncoder(encoder_layers, nlayers)\n self.encoder = nn.Embedding(ntoken, ninp)\n self.ninp = ninp\n self.decoder = nn.Linear(ninp, ntoken)\n\n self.init_weights()\n\n def _generate_square_subsequent_mask(self, sz):\n mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)\n mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))\n return mask\n\n def init_weights(self):\n initrange = 0.1\n self.encoder.weight.data.uniform_(-initrange, initrange)\n self.decoder.bias.data.zero_()\n self.decoder.weight.data.uniform_(-initrange, initrange)\n\n def forward(self, src):\n if self.src_mask is None or self.src_mask.size(0) != src.size(0):\n device = src.device\n mask = self._generate_square_subsequent_mask(src.size(0)).to(device)\n self.src_mask = mask\n# print(\"src.device: \", src.device)\n src = self.encoder(src) * math.sqrt(self.ninp)\n# print(\"self.encoder(src) size: \", src.size())\n src = self.pos_encoder(src)\n# print(\"elf.pos_encoder(src) size: \", src.size())\n output = self.transformer_encoder(src, self.src_mask)\n# print(\"output size: \", output.size())\n output = self.decoder(output)\n return output","_____no_output_____"],["ntokens = len(aminoacid_list) + 1 # the size of vocabulary\nemsize = 12 # embedding dimension\nnhid = 100 # the dimension of the feedforward network model in nn.TransformerEncoder\nnlayers = 6 # the number of nn.TransformerEncoderLayer in nn.TransformerEncoder\nnhead = 12 # the number of heads in the multiheadattention models\ndropout = 0.1 # the dropout value\nmodel = TransformerModel(ntokens, emsize, nhead, nhid, nlayers, dropout)","_____no_output_____"],["import time","_____no_output_____"],["criterion = nn.CrossEntropyLoss()\nlr = 3.0 # learning rate\noptimizer = torch.optim.SGD(model.parameters(), lr=lr)\nscheduler = torch.optim.lr_scheduler.StepLR(optimizer, 1.0, gamma=0.95)\n\nmodel.to(device)\nmodel.train() # Turn on the train mode","_____no_output_____"],["start_time = time.time()\nprint_every = 1\nloss_vector = []\n\nfor epoch in np.arange(0, df_dev.shape[0]): \n seq = df_dev.iloc[epoch, 6]\n sentence_in = prepare_sequence(seq)\n targets = prepare_labels(seq)\n# sentence_in = sentence_in.to(device = device)\n sentence_in = sentence_in.unsqueeze(1).to(device = device)\n targets = targets.to(device = device)\n \n optimizer.zero_grad()\n output = model(sentence_in)\n \n print(\"targets size: \", targets.size())\n loss = criterion(output.view(-1, ntokens), targets)\n loss.backward()\n torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)\n optimizer.step()\n if epoch % print_every == 0:\n print(f\"At Epoch: %.1f\"% epoch)\n print(f\"Loss %.4f\"% loss)\n loss_vector.append(loss)\n break\n \n","targets size: torch.Size([115])\nAt Epoch: 0.0\nLoss 4.3648\n"],["start_time = time.time()\nprint_every = 1000\n# loss_vector = []\n\nfor epoch in np.arange(0, df_dev.shape[0]): \n seq = df_dev.iloc[epoch, 6]\n \n sentence_in = prepare_sequence(seq)\n targets = prepare_labels(seq)\n# sentence_in = sentence_in.to(device = device)\n sentence_in = sentence_in.unsqueeze(1).to(device = device)\n targets = targets.to(device = device)\n \n optimizer.zero_grad()\n output = model(sentence_in)\n \n# print(\"targets size: \", targets.size())\n loss = criterion(output.view(-1, ntokens), targets)\n loss.backward()\n torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)\n optimizer.step()\n if epoch % print_every == 0:\n print(f\"At Epoch: %.1f\"% epoch)\n print(f\"Loss %.4f\"% loss)\n elapsed = time.time() - start_time\n print(f\"time elapsed %.4f\"% elapsed)\n torch.save(model.state_dict(), \"../data/transformer_encoder_201012.pt\")\n# loss_vector.append(loss)\n\n ","At Epoch: 0.0\nLoss 8.4750\ntime elapsed 0.0296\nAt Epoch: 1000.0\nLoss 3.0725\ntime elapsed 23.4438\nAt Epoch: 2000.0\nLoss 3.2927\ntime elapsed 45.1294\nAt Epoch: 3000.0\nLoss 2.9190\ntime elapsed 66.5057\nAt Epoch: 4000.0\nLoss 3.0607\ntime elapsed 87.3455\nAt Epoch: 5000.0\nLoss 2.9439\ntime elapsed 108.5937\nAt Epoch: 6000.0\nLoss 3.0892\ntime elapsed 129.0390\nAt Epoch: 7000.0\nLoss 3.0167\ntime elapsed 149.5365\nAt Epoch: 8000.0\nLoss 3.0786\ntime elapsed 170.1348\nAt Epoch: 9000.0\nLoss 2.8547\ntime elapsed 193.8191\nAt Epoch: 10000.0\nLoss 3.0577\ntime elapsed 215.4009\nAt Epoch: 11000.0\nLoss 2.8724\ntime elapsed 237.3885\nAt Epoch: 12000.0\nLoss 2.9954\ntime elapsed 258.8655\nAt Epoch: 13000.0\nLoss 2.9179\ntime elapsed 279.4901\nAt Epoch: 14000.0\nLoss 2.9298\ntime elapsed 300.0160\nAt Epoch: 15000.0\nLoss 2.9484\ntime elapsed 321.0145\nAt Epoch: 16000.0\nLoss 2.8884\ntime elapsed 342.1072\nAt Epoch: 17000.0\nLoss 2.8484\ntime elapsed 363.1312\nAt Epoch: 18000.0\nLoss 2.8564\ntime elapsed 384.3074\nAt Epoch: 19000.0\nLoss 2.8983\ntime elapsed 404.7843\nAt Epoch: 20000.0\nLoss 2.9394\ntime elapsed 425.4477\nAt Epoch: 21000.0\nLoss 2.9441\ntime elapsed 445.9871\nAt Epoch: 22000.0\nLoss 3.0404\ntime elapsed 467.5490\nAt Epoch: 23000.0\nLoss 2.9080\ntime elapsed 488.7186\nAt Epoch: 24000.0\nLoss 2.9070\ntime elapsed 509.4235\nAt Epoch: 25000.0\nLoss 2.9876\ntime elapsed 530.4340\nAt Epoch: 26000.0\nLoss 2.8721\ntime elapsed 551.2129\nAt Epoch: 27000.0\nLoss 2.9223\ntime elapsed 572.3205\nAt Epoch: 28000.0\nLoss 3.0041\ntime elapsed 593.1280\nAt Epoch: 29000.0\nLoss 2.9332\ntime elapsed 614.2564\nAt Epoch: 30000.0\nLoss 2.9536\ntime elapsed 635.6761\nAt Epoch: 31000.0\nLoss 2.9049\ntime elapsed 656.7652\nAt Epoch: 32000.0\nLoss 2.9010\ntime elapsed 678.9555\nAt Epoch: 33000.0\nLoss 3.0228\ntime elapsed 701.3504\nAt Epoch: 34000.0\nLoss 2.8772\ntime elapsed 723.1934\nAt Epoch: 35000.0\nLoss 2.9568\ntime elapsed 743.8592\nAt Epoch: 36000.0\nLoss 3.0205\ntime elapsed 764.2397\nAt Epoch: 37000.0\nLoss 2.9290\ntime elapsed 785.0874\nAt Epoch: 38000.0\nLoss 2.9392\ntime elapsed 805.7109\nAt Epoch: 39000.0\nLoss 2.9803\ntime elapsed 826.0186\nAt Epoch: 40000.0\nLoss 3.3199\ntime elapsed 846.4928\nAt Epoch: 41000.0\nLoss 3.0244\ntime elapsed 867.0773\nAt Epoch: 42000.0\nLoss 2.9066\ntime elapsed 888.0423\nAt Epoch: 43000.0\nLoss 3.0540\ntime elapsed 908.6464\nAt Epoch: 44000.0\nLoss 2.8792\ntime elapsed 929.9293\nAt Epoch: 45000.0\nLoss 2.8411\ntime elapsed 951.4267\nAt Epoch: 46000.0\nLoss 2.8649\ntime elapsed 974.1426\nAt Epoch: 47000.0\nLoss 3.0632\ntime elapsed 995.3097\nAt Epoch: 48000.0\nLoss 2.8977\ntime elapsed 1016.0418\nAt Epoch: 49000.0\nLoss 2.8232\ntime elapsed 1036.8268\nAt Epoch: 50000.0\nLoss 2.9775\ntime elapsed 1057.6992\nAt Epoch: 51000.0\nLoss 2.8774\ntime elapsed 1078.1217\nAt Epoch: 52000.0\nLoss 3.0089\ntime elapsed 1099.0703\nAt Epoch: 53000.0\nLoss 3.0592\ntime elapsed 1119.7085\nAt Epoch: 54000.0\nLoss 2.9713\ntime elapsed 1140.3441\nAt Epoch: 55000.0\nLoss 3.1734\ntime elapsed 1160.6746\nAt Epoch: 56000.0\nLoss 3.0600\ntime elapsed 1181.5164\nAt Epoch: 57000.0\nLoss 3.0501\ntime elapsed 1203.4995\nAt Epoch: 58000.0\nLoss 2.9531\ntime elapsed 1224.1511\nAt Epoch: 59000.0\nLoss 3.1052\ntime elapsed 1244.6471\nAt Epoch: 60000.0\nLoss 3.0830\ntime elapsed 1267.7761\nAt Epoch: 61000.0\nLoss 3.2620\ntime elapsed 1288.7117\nAt Epoch: 62000.0\nLoss 3.0715\ntime elapsed 1309.3894\nAt Epoch: 63000.0\nLoss 2.9098\ntime elapsed 1331.1903\nAt Epoch: 64000.0\nLoss 3.0716\ntime elapsed 1353.5770\nAt Epoch: 65000.0\nLoss 3.0372\ntime elapsed 1374.6066\nAt Epoch: 66000.0\nLoss 2.8375\ntime elapsed 1395.4844\nAt Epoch: 67000.0\nLoss 2.8937\ntime elapsed 1416.3880\nAt Epoch: 68000.0\nLoss 3.0458\ntime elapsed 1437.1701\nAt Epoch: 69000.0\nLoss 2.9471\ntime elapsed 1457.8177\nAt Epoch: 70000.0\nLoss 2.8958\ntime elapsed 1478.3414\nAt Epoch: 71000.0\nLoss 2.8441\ntime elapsed 1499.9152\nAt Epoch: 72000.0\nLoss 3.0838\ntime elapsed 1520.5460\nAt Epoch: 73000.0\nLoss 2.9954\ntime elapsed 1543.2272\nAt Epoch: 74000.0\nLoss 2.8729\ntime elapsed 1564.0716\nAt Epoch: 75000.0\nLoss 2.9577\ntime elapsed 1584.8576\nAt Epoch: 76000.0\nLoss 2.8508\ntime elapsed 1605.4031\nAt Epoch: 77000.0\nLoss 3.0296\ntime elapsed 1626.2475\nAt Epoch: 78000.0\nLoss 3.1241\ntime elapsed 1646.9719\nAt Epoch: 79000.0\nLoss 3.0788\ntime elapsed 1667.8616\nAt Epoch: 80000.0\nLoss 2.9333\ntime elapsed 1691.5064\nAt Epoch: 81000.0\nLoss 2.9634\ntime elapsed 1714.5915\nAt Epoch: 82000.0\nLoss 3.0049\ntime elapsed 1737.5477\nAt Epoch: 83000.0\nLoss 2.9076\ntime elapsed 1758.6892\nAt Epoch: 84000.0\nLoss 3.0611\ntime elapsed 1779.7354\nAt Epoch: 85000.0\nLoss 3.1268\ntime elapsed 1800.5026\nAt Epoch: 86000.0\nLoss 3.0537\ntime elapsed 1821.2306\nAt Epoch: 87000.0\nLoss 3.0465\ntime elapsed 1842.5739\nAt Epoch: 88000.0\nLoss 2.9932\ntime elapsed 1863.4517\nAt Epoch: 89000.0\nLoss 2.9754\ntime elapsed 1883.9016\nAt Epoch: 90000.0\nLoss 2.9752\ntime elapsed 1904.9715\nAt Epoch: 91000.0\nLoss 3.0308\ntime elapsed 1925.6555\nAt Epoch: 92000.0\nLoss 3.4525\ntime elapsed 1946.2182\nAt Epoch: 93000.0\nLoss 2.9289\ntime elapsed 1966.9721\nAt Epoch: 94000.0\nLoss 3.0368\ntime elapsed 1987.7996\nAt Epoch: 95000.0\nLoss 2.7917\ntime elapsed 2009.1464\nAt Epoch: 96000.0\nLoss 2.9012\ntime elapsed 2033.3650\nAt Epoch: 97000.0\nLoss 2.9937\ntime elapsed 2054.6392\nAt Epoch: 98000.0\nLoss 3.0972\ntime elapsed 2075.4421\nAt Epoch: 99000.0\nLoss 3.1194\ntime elapsed 2096.3616\nAt Epoch: 100000.0\nLoss 2.9343\ntime elapsed 2117.2292\nAt Epoch: 101000.0\nLoss 3.0200\ntime elapsed 2138.0234\nAt Epoch: 102000.0\nLoss 3.2507\ntime elapsed 2158.6976\nAt Epoch: 103000.0\nLoss 2.9581\ntime elapsed 2180.0396\nAt Epoch: 104000.0\nLoss 3.2382\ntime elapsed 2202.0479\nAt Epoch: 105000.0\nLoss 3.0344\ntime elapsed 2223.0734\nAt Epoch: 106000.0\nLoss 2.9038\ntime elapsed 2244.1027\nAt Epoch: 107000.0\nLoss 2.9855\ntime elapsed 2265.6176\nAt Epoch: 108000.0\nLoss 3.0731\ntime elapsed 2286.5905\nAt Epoch: 109000.0\nLoss 3.2424\ntime elapsed 2307.5373\nAt Epoch: 110000.0\nLoss 2.9557\ntime elapsed 2328.5830\nAt Epoch: 111000.0\nLoss 2.8459\ntime elapsed 2349.3629\nAt Epoch: 112000.0\nLoss 3.1116\ntime elapsed 2369.8899\nAt Epoch: 113000.0\nLoss 2.8973\ntime elapsed 2391.1698\nAt Epoch: 114000.0\nLoss 3.0910\ntime elapsed 2412.4004\nAt Epoch: 115000.0\nLoss 3.0290\ntime elapsed 2433.3900\nAt Epoch: 116000.0\nLoss 2.8997\ntime elapsed 2454.9702\nAt Epoch: 117000.0\nLoss 3.0588\ntime elapsed 2476.0218\nAt Epoch: 118000.0\nLoss 3.0554\ntime elapsed 2497.6892\nAt Epoch: 119000.0\nLoss 2.9734\ntime elapsed 2519.9516\nAt Epoch: 120000.0\nLoss 3.1741\ntime elapsed 2544.0268\nAt Epoch: 121000.0\nLoss 2.9140\ntime elapsed 2565.9631\nAt Epoch: 122000.0\nLoss 2.8833\ntime elapsed 2586.9483\nAt Epoch: 123000.0\nLoss 3.0268\ntime elapsed 2607.9265\nAt Epoch: 124000.0\nLoss 2.9004\ntime elapsed 2630.8401\nAt Epoch: 125000.0\nLoss 3.0086\ntime elapsed 2651.4247\nAt Epoch: 126000.0\nLoss 3.0224\ntime elapsed 2672.0303\nAt Epoch: 127000.0\nLoss 3.2991\ntime elapsed 2693.3880\nAt Epoch: 128000.0\nLoss 3.1404\ntime elapsed 2714.1731\nAt Epoch: 129000.0\nLoss 3.0332\ntime elapsed 2736.7471\nAt Epoch: 130000.0\nLoss 3.2477\ntime elapsed 2759.7213\nAt Epoch: 131000.0\nLoss 3.1290\ntime elapsed 2780.8084\nAt Epoch: 132000.0\nLoss 2.9785\ntime elapsed 2802.3767\nAt Epoch: 133000.0\nLoss 2.9272\ntime elapsed 2822.8847\nAt Epoch: 134000.0\nLoss 2.9764\ntime elapsed 2843.6123\nAt Epoch: 135000.0\nLoss 3.2959\ntime elapsed 2864.3198\nAt Epoch: 136000.0\nLoss 2.9485\ntime elapsed 2885.0592\nAt Epoch: 137000.0\nLoss 3.1048\ntime elapsed 2906.0318\nAt Epoch: 138000.0\nLoss 2.8635\ntime elapsed 2926.9689\nAt Epoch: 139000.0\nLoss 3.2045\ntime elapsed 2947.7551\nAt Epoch: 140000.0\nLoss 3.0406\ntime elapsed 2968.8903\nAt Epoch: 141000.0\nLoss 2.9509\ntime elapsed 2989.9148\nAt Epoch: 142000.0\nLoss 2.9764\ntime elapsed 3010.8331\nAt Epoch: 143000.0\nLoss 3.0912\ntime elapsed 3031.6044\nAt Epoch: 144000.0\nLoss 3.0150\ntime elapsed 3052.5004\nAt Epoch: 145000.0\nLoss 3.0335\ntime elapsed 3075.1163\nAt Epoch: 146000.0\nLoss 2.9631\ntime elapsed 3098.1154\nAt Epoch: 147000.0\nLoss 3.0329\ntime elapsed 3121.1964\nAt Epoch: 148000.0\nLoss 3.4890\ntime elapsed 3143.0822\nAt Epoch: 149000.0\nLoss 2.9324\ntime elapsed 3164.6447\nAt Epoch: 150000.0\nLoss 2.8988\ntime elapsed 3188.3173\nAt Epoch: 151000.0\nLoss 2.9578\ntime elapsed 3208.9634\nAt Epoch: 152000.0\nLoss 3.0209\ntime elapsed 3229.6342\nAt Epoch: 153000.0\nLoss 3.4023\ntime elapsed 3251.5514\nAt Epoch: 154000.0\nLoss 2.9682\ntime elapsed 3273.0930\n"],["torch.save(model.state_dict(), \"../data/transformer_encoder_201012.pt\")","_____no_output_____"],["print(\"done\")","done\n"],["ntokens = len(aminoacid_list) + 1 # the size of vocabulary\nemsize = 128 # embedding dimension\nnhid = 100 # the dimension of the feedforward network model in nn.TransformerEncoder\nnlayers = 3 # the number of nn.TransformerEncoderLayer in nn.TransformerEncoder\nnhead = 12 # the number of heads in the multiheadattention models\ndropout = 0.1 # the dropout value\nmodel = TransformerModel(ntokens, emsize, nhead, nhid, nlayers, dropout)","_____no_output_____"]]],"string":"[\n [\n [\n \"# Model Description\\n- Apply a transformer based model to pfam/unirep_50 data and extract the embedding features\\n> In this tutorial, we train nn.TransformerEncoder model on a language modeling task. The language modeling task is to assign a probability for the likelihood of a given word (or a sequence of words) to follow a sequence of words. A sequence of tokens are passed to the embedding layer first, followed by a positional encoding layer to account for the order of the word (see the next paragraph for more details). The nn.TransformerEncoder consists of multiple layers of nn.TransformerEncoderLayer. Along with the input sequence, a square attention mask is required because the self-attention layers in nn.TransformerEncoder are only allowed to attend the earlier positions in the sequence. For the language modeling task, any tokens on the future positions should be masked. To have the actual words, the output of nn.TransformerEncoder model is sent to the final Linear layer, which is followed by a log-Softmax function.\\n\\n## Math and model formulation and code reference:\\n- Attention is all you need https://arxiv.org/abs/1706.03762\\n- ResNet https://towardsdatascience.com/understanding-and-visualizing-resnets-442284831be8\\n- MIT Visualization http://jalammar.github.io/illustrated-transformer/\\n- An Annotated transformer http://nlp.seas.harvard.edu/2018/04/03/attention.html#a-real-world-example\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"import math\\nimport torch.nn as nn\\nimport argparse\\nimport random\\nimport warnings\\nimport numpy as np\\nimport torch\\nimport torch.nn.functional as F\\nfrom torch import optim\\nimport torch.backends.cudnn as cudnn\\nfrom torch.utils.data import DataLoader\\nfrom torch.utils.data import Dataset\\nfrom torch.autograd import Variable\\nimport itertools\\nimport pandas as pd\\n# seed = 7\\n# torch.manual_seed(seed)\\n# np.random.seed(seed)\\n\\npfamA_motors = pd.read_csv(\\\"../data/pfamA_motors.csv\\\")\\ndf_dev = pd.read_csv(\\\"../data/df_dev.csv\\\")\\npfamA_motors = pfamA_motors.iloc[:,1:]\\nclan_train_dat = pfamA_motors.groupby(\\\"clan\\\").head(4000)\\nclan_train_dat = clan_train_dat.sample(frac=1).reset_index(drop=True)\\nclan_test_dat = pfamA_motors.loc[~pfamA_motors[\\\"id\\\"].isin(clan_train_dat[\\\"id\\\"]),:].groupby(\\\"clan\\\").head(400)\\n\\nclan_train_dat.shape\\n\\ndef df_to_tup(dat):\\n data = []\\n for i in range(dat.shape[0]):\\n row = dat.iloc[i,:]\\n tup = (row[\\\"seq\\\"],row[\\\"clan\\\"])\\n data.append(tup)\\n return data\\n\\nclan_training_data = df_to_tup(clan_train_dat)\\nclan_test_data = df_to_tup(clan_test_dat)\\nfor seq,clan in clan_training_data:\\n print(seq)\\n print(clan)\\n break\\n\\naminoacid_list = [\\n 'A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L',\\n 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'V', 'W', 'Y'\\n]\\nclan_list = [\\\"actin_like\\\",\\\"tubulin_c\\\",\\\"tubulin_binding\\\",\\\"p_loop_gtpase\\\"]\\n \\naa_to_ix = dict(zip(aminoacid_list, np.arange(1, 21)))\\nclan_to_ix = dict(zip(clan_list, np.arange(0, 4)))\\n\\ndef word_to_index(seq,to_ix):\\n \\\"Returns a list of indices (integers) from a list of words.\\\"\\n return [to_ix.get(word, 0) for word in seq]\\n\\nix_to_aa = dict(zip(np.arange(1, 21), aminoacid_list))\\nix_to_clan = dict(zip(np.arange(0, 4), clan_list))\\n\\ndef index_to_word(ixs,ix_to): \\n \\\"Returns a list of words, given a list of their corresponding indices.\\\"\\n return [ix_to.get(ix, 'X') for ix in ixs]\\n\\ndef prepare_sequence(seq):\\n idxs = word_to_index(seq[0:-1],aa_to_ix)\\n return torch.tensor(idxs, dtype=torch.long)\\n\\ndef prepare_labels(seq):\\n idxs = word_to_index(seq[1:],aa_to_ix)\\n return torch.tensor(idxs, dtype=torch.long)\\n\\nprepare_labels('YCHXXXXX')\\n\\n\",\n \"DGRIPQRDVAAKLVIVMVGLPARGKSYITKKLQRYLSWQQHESRIFNVGNRRRNAAGIKTSARANSGQALDPPVEAATI\\np_loop_gtpase\\n\"\n ],\n [\n \"# set device\\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\\ndevice\",\n \"_____no_output_____\"\n ],\n [\n \"class PositionalEncoding(nn.Module):\\n \\\"\\\"\\\"\\n PositionalEncoding module injects some information about the relative or absolute position of\\n the tokens in the sequence. The positional encodings have the same dimension as the embeddings \\n so that the two can be summed. Here, we use sine and cosine functions of different frequencies.\\n \\\"\\\"\\\"\\n def __init__(self, d_model, dropout=0.1, max_len=5000):\\n super(PositionalEncoding, self).__init__()\\n self.dropout = nn.Dropout(p=dropout)\\n\\n pe = torch.zeros(max_len, d_model)\\n position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)\\n div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))\\n pe[:, 0::2] = torch.sin(position * div_term)\\n pe[:, 1::2] = torch.cos(position * div_term)\\n pe = pe.unsqueeze(0).transpose(0, 1)\\n \\n# pe[:, 0::2] = torch.sin(position * div_term)\\n# pe[:, 1::2] = torch.cos(position * div_term)\\n# pe = pe.unsqueeze(0)\\n \\n self.register_buffer('pe', pe)\\n\\n def forward(self, x):\\n# x = x + self.pe[:x.size(0), :]\\n# print(\\\"x.size() : \\\", x.size())\\n# print(\\\"self.pe.size() :\\\", self.pe[:x.size(0),:,:].size())\\n x = torch.add(x ,Variable(self.pe[:x.size(0),:,:], requires_grad=False))\\n return self.dropout(x)\",\n \"_____no_output_____\"\n ],\n [\n \"\\n \\nclass TransformerModel(nn.Module):\\n\\n def __init__(self, ntoken, ninp, nhead, nhid, nlayers, dropout=0.5):\\n super(TransformerModel, self).__init__()\\n from torch.nn import TransformerEncoder, TransformerEncoderLayer\\n self.model_type = 'Transformer'\\n self.src_mask = None\\n self.pos_encoder = PositionalEncoding(ninp)\\n encoder_layers = TransformerEncoderLayer(ninp, nhead, nhid, dropout)\\n self.transformer_encoder = TransformerEncoder(encoder_layers, nlayers)\\n self.encoder = nn.Embedding(ntoken, ninp)\\n self.ninp = ninp\\n self.decoder = nn.Linear(ninp, ntoken)\\n\\n self.init_weights()\\n\\n def _generate_square_subsequent_mask(self, sz):\\n mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)\\n mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))\\n return mask\\n\\n def init_weights(self):\\n initrange = 0.1\\n self.encoder.weight.data.uniform_(-initrange, initrange)\\n self.decoder.bias.data.zero_()\\n self.decoder.weight.data.uniform_(-initrange, initrange)\\n\\n def forward(self, src):\\n if self.src_mask is None or self.src_mask.size(0) != src.size(0):\\n device = src.device\\n mask = self._generate_square_subsequent_mask(src.size(0)).to(device)\\n self.src_mask = mask\\n# print(\\\"src.device: \\\", src.device)\\n src = self.encoder(src) * math.sqrt(self.ninp)\\n# print(\\\"self.encoder(src) size: \\\", src.size())\\n src = self.pos_encoder(src)\\n# print(\\\"elf.pos_encoder(src) size: \\\", src.size())\\n output = self.transformer_encoder(src, self.src_mask)\\n# print(\\\"output size: \\\", output.size())\\n output = self.decoder(output)\\n return output\",\n \"_____no_output_____\"\n ],\n [\n \"ntokens = len(aminoacid_list) + 1 # the size of vocabulary\\nemsize = 12 # embedding dimension\\nnhid = 100 # the dimension of the feedforward network model in nn.TransformerEncoder\\nnlayers = 6 # the number of nn.TransformerEncoderLayer in nn.TransformerEncoder\\nnhead = 12 # the number of heads in the multiheadattention models\\ndropout = 0.1 # the dropout value\\nmodel = TransformerModel(ntokens, emsize, nhead, nhid, nlayers, dropout)\",\n \"_____no_output_____\"\n ],\n [\n \"import time\",\n \"_____no_output_____\"\n ],\n [\n \"criterion = nn.CrossEntropyLoss()\\nlr = 3.0 # learning rate\\noptimizer = torch.optim.SGD(model.parameters(), lr=lr)\\nscheduler = torch.optim.lr_scheduler.StepLR(optimizer, 1.0, gamma=0.95)\\n\\nmodel.to(device)\\nmodel.train() # Turn on the train mode\",\n \"_____no_output_____\"\n ],\n [\n \"start_time = time.time()\\nprint_every = 1\\nloss_vector = []\\n\\nfor epoch in np.arange(0, df_dev.shape[0]): \\n seq = df_dev.iloc[epoch, 6]\\n sentence_in = prepare_sequence(seq)\\n targets = prepare_labels(seq)\\n# sentence_in = sentence_in.to(device = device)\\n sentence_in = sentence_in.unsqueeze(1).to(device = device)\\n targets = targets.to(device = device)\\n \\n optimizer.zero_grad()\\n output = model(sentence_in)\\n \\n print(\\\"targets size: \\\", targets.size())\\n loss = criterion(output.view(-1, ntokens), targets)\\n loss.backward()\\n torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)\\n optimizer.step()\\n if epoch % print_every == 0:\\n print(f\\\"At Epoch: %.1f\\\"% epoch)\\n print(f\\\"Loss %.4f\\\"% loss)\\n loss_vector.append(loss)\\n break\\n \\n\",\n \"targets size: torch.Size([115])\\nAt Epoch: 0.0\\nLoss 4.3648\\n\"\n ],\n [\n \"start_time = time.time()\\nprint_every = 1000\\n# loss_vector = []\\n\\nfor epoch in np.arange(0, df_dev.shape[0]): \\n seq = df_dev.iloc[epoch, 6]\\n \\n sentence_in = prepare_sequence(seq)\\n targets = prepare_labels(seq)\\n# sentence_in = sentence_in.to(device = device)\\n sentence_in = sentence_in.unsqueeze(1).to(device = device)\\n targets = targets.to(device = device)\\n \\n optimizer.zero_grad()\\n output = model(sentence_in)\\n \\n# print(\\\"targets size: \\\", targets.size())\\n loss = criterion(output.view(-1, ntokens), targets)\\n loss.backward()\\n torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)\\n optimizer.step()\\n if epoch % print_every == 0:\\n print(f\\\"At Epoch: %.1f\\\"% epoch)\\n print(f\\\"Loss %.4f\\\"% loss)\\n elapsed = time.time() - start_time\\n print(f\\\"time elapsed %.4f\\\"% elapsed)\\n torch.save(model.state_dict(), \\\"../data/transformer_encoder_201012.pt\\\")\\n# loss_vector.append(loss)\\n\\n \",\n \"At Epoch: 0.0\\nLoss 8.4750\\ntime elapsed 0.0296\\nAt Epoch: 1000.0\\nLoss 3.0725\\ntime elapsed 23.4438\\nAt Epoch: 2000.0\\nLoss 3.2927\\ntime elapsed 45.1294\\nAt Epoch: 3000.0\\nLoss 2.9190\\ntime elapsed 66.5057\\nAt Epoch: 4000.0\\nLoss 3.0607\\ntime elapsed 87.3455\\nAt Epoch: 5000.0\\nLoss 2.9439\\ntime elapsed 108.5937\\nAt Epoch: 6000.0\\nLoss 3.0892\\ntime elapsed 129.0390\\nAt Epoch: 7000.0\\nLoss 3.0167\\ntime elapsed 149.5365\\nAt Epoch: 8000.0\\nLoss 3.0786\\ntime elapsed 170.1348\\nAt Epoch: 9000.0\\nLoss 2.8547\\ntime elapsed 193.8191\\nAt Epoch: 10000.0\\nLoss 3.0577\\ntime elapsed 215.4009\\nAt Epoch: 11000.0\\nLoss 2.8724\\ntime elapsed 237.3885\\nAt Epoch: 12000.0\\nLoss 2.9954\\ntime elapsed 258.8655\\nAt Epoch: 13000.0\\nLoss 2.9179\\ntime elapsed 279.4901\\nAt Epoch: 14000.0\\nLoss 2.9298\\ntime elapsed 300.0160\\nAt Epoch: 15000.0\\nLoss 2.9484\\ntime elapsed 321.0145\\nAt Epoch: 16000.0\\nLoss 2.8884\\ntime elapsed 342.1072\\nAt Epoch: 17000.0\\nLoss 2.8484\\ntime elapsed 363.1312\\nAt Epoch: 18000.0\\nLoss 2.8564\\ntime elapsed 384.3074\\nAt Epoch: 19000.0\\nLoss 2.8983\\ntime elapsed 404.7843\\nAt Epoch: 20000.0\\nLoss 2.9394\\ntime elapsed 425.4477\\nAt Epoch: 21000.0\\nLoss 2.9441\\ntime elapsed 445.9871\\nAt Epoch: 22000.0\\nLoss 3.0404\\ntime elapsed 467.5490\\nAt Epoch: 23000.0\\nLoss 2.9080\\ntime elapsed 488.7186\\nAt Epoch: 24000.0\\nLoss 2.9070\\ntime elapsed 509.4235\\nAt Epoch: 25000.0\\nLoss 2.9876\\ntime elapsed 530.4340\\nAt Epoch: 26000.0\\nLoss 2.8721\\ntime elapsed 551.2129\\nAt Epoch: 27000.0\\nLoss 2.9223\\ntime elapsed 572.3205\\nAt Epoch: 28000.0\\nLoss 3.0041\\ntime elapsed 593.1280\\nAt Epoch: 29000.0\\nLoss 2.9332\\ntime elapsed 614.2564\\nAt Epoch: 30000.0\\nLoss 2.9536\\ntime elapsed 635.6761\\nAt Epoch: 31000.0\\nLoss 2.9049\\ntime elapsed 656.7652\\nAt Epoch: 32000.0\\nLoss 2.9010\\ntime elapsed 678.9555\\nAt Epoch: 33000.0\\nLoss 3.0228\\ntime elapsed 701.3504\\nAt Epoch: 34000.0\\nLoss 2.8772\\ntime elapsed 723.1934\\nAt Epoch: 35000.0\\nLoss 2.9568\\ntime elapsed 743.8592\\nAt Epoch: 36000.0\\nLoss 3.0205\\ntime elapsed 764.2397\\nAt Epoch: 37000.0\\nLoss 2.9290\\ntime elapsed 785.0874\\nAt Epoch: 38000.0\\nLoss 2.9392\\ntime elapsed 805.7109\\nAt Epoch: 39000.0\\nLoss 2.9803\\ntime elapsed 826.0186\\nAt Epoch: 40000.0\\nLoss 3.3199\\ntime elapsed 846.4928\\nAt Epoch: 41000.0\\nLoss 3.0244\\ntime elapsed 867.0773\\nAt Epoch: 42000.0\\nLoss 2.9066\\ntime elapsed 888.0423\\nAt Epoch: 43000.0\\nLoss 3.0540\\ntime elapsed 908.6464\\nAt Epoch: 44000.0\\nLoss 2.8792\\ntime elapsed 929.9293\\nAt Epoch: 45000.0\\nLoss 2.8411\\ntime elapsed 951.4267\\nAt Epoch: 46000.0\\nLoss 2.8649\\ntime elapsed 974.1426\\nAt Epoch: 47000.0\\nLoss 3.0632\\ntime elapsed 995.3097\\nAt Epoch: 48000.0\\nLoss 2.8977\\ntime elapsed 1016.0418\\nAt Epoch: 49000.0\\nLoss 2.8232\\ntime elapsed 1036.8268\\nAt Epoch: 50000.0\\nLoss 2.9775\\ntime elapsed 1057.6992\\nAt Epoch: 51000.0\\nLoss 2.8774\\ntime elapsed 1078.1217\\nAt Epoch: 52000.0\\nLoss 3.0089\\ntime elapsed 1099.0703\\nAt Epoch: 53000.0\\nLoss 3.0592\\ntime elapsed 1119.7085\\nAt Epoch: 54000.0\\nLoss 2.9713\\ntime elapsed 1140.3441\\nAt Epoch: 55000.0\\nLoss 3.1734\\ntime elapsed 1160.6746\\nAt Epoch: 56000.0\\nLoss 3.0600\\ntime elapsed 1181.5164\\nAt Epoch: 57000.0\\nLoss 3.0501\\ntime elapsed 1203.4995\\nAt Epoch: 58000.0\\nLoss 2.9531\\ntime elapsed 1224.1511\\nAt Epoch: 59000.0\\nLoss 3.1052\\ntime elapsed 1244.6471\\nAt Epoch: 60000.0\\nLoss 3.0830\\ntime elapsed 1267.7761\\nAt Epoch: 61000.0\\nLoss 3.2620\\ntime elapsed 1288.7117\\nAt Epoch: 62000.0\\nLoss 3.0715\\ntime elapsed 1309.3894\\nAt Epoch: 63000.0\\nLoss 2.9098\\ntime elapsed 1331.1903\\nAt Epoch: 64000.0\\nLoss 3.0716\\ntime elapsed 1353.5770\\nAt Epoch: 65000.0\\nLoss 3.0372\\ntime elapsed 1374.6066\\nAt Epoch: 66000.0\\nLoss 2.8375\\ntime elapsed 1395.4844\\nAt Epoch: 67000.0\\nLoss 2.8937\\ntime elapsed 1416.3880\\nAt Epoch: 68000.0\\nLoss 3.0458\\ntime elapsed 1437.1701\\nAt Epoch: 69000.0\\nLoss 2.9471\\ntime elapsed 1457.8177\\nAt Epoch: 70000.0\\nLoss 2.8958\\ntime elapsed 1478.3414\\nAt Epoch: 71000.0\\nLoss 2.8441\\ntime elapsed 1499.9152\\nAt Epoch: 72000.0\\nLoss 3.0838\\ntime elapsed 1520.5460\\nAt Epoch: 73000.0\\nLoss 2.9954\\ntime elapsed 1543.2272\\nAt Epoch: 74000.0\\nLoss 2.8729\\ntime elapsed 1564.0716\\nAt Epoch: 75000.0\\nLoss 2.9577\\ntime elapsed 1584.8576\\nAt Epoch: 76000.0\\nLoss 2.8508\\ntime elapsed 1605.4031\\nAt Epoch: 77000.0\\nLoss 3.0296\\ntime elapsed 1626.2475\\nAt Epoch: 78000.0\\nLoss 3.1241\\ntime elapsed 1646.9719\\nAt Epoch: 79000.0\\nLoss 3.0788\\ntime elapsed 1667.8616\\nAt Epoch: 80000.0\\nLoss 2.9333\\ntime elapsed 1691.5064\\nAt Epoch: 81000.0\\nLoss 2.9634\\ntime elapsed 1714.5915\\nAt Epoch: 82000.0\\nLoss 3.0049\\ntime elapsed 1737.5477\\nAt Epoch: 83000.0\\nLoss 2.9076\\ntime elapsed 1758.6892\\nAt Epoch: 84000.0\\nLoss 3.0611\\ntime elapsed 1779.7354\\nAt Epoch: 85000.0\\nLoss 3.1268\\ntime elapsed 1800.5026\\nAt Epoch: 86000.0\\nLoss 3.0537\\ntime elapsed 1821.2306\\nAt Epoch: 87000.0\\nLoss 3.0465\\ntime elapsed 1842.5739\\nAt Epoch: 88000.0\\nLoss 2.9932\\ntime elapsed 1863.4517\\nAt Epoch: 89000.0\\nLoss 2.9754\\ntime elapsed 1883.9016\\nAt Epoch: 90000.0\\nLoss 2.9752\\ntime elapsed 1904.9715\\nAt Epoch: 91000.0\\nLoss 3.0308\\ntime elapsed 1925.6555\\nAt Epoch: 92000.0\\nLoss 3.4525\\ntime elapsed 1946.2182\\nAt Epoch: 93000.0\\nLoss 2.9289\\ntime elapsed 1966.9721\\nAt Epoch: 94000.0\\nLoss 3.0368\\ntime elapsed 1987.7996\\nAt Epoch: 95000.0\\nLoss 2.7917\\ntime elapsed 2009.1464\\nAt Epoch: 96000.0\\nLoss 2.9012\\ntime elapsed 2033.3650\\nAt Epoch: 97000.0\\nLoss 2.9937\\ntime elapsed 2054.6392\\nAt Epoch: 98000.0\\nLoss 3.0972\\ntime elapsed 2075.4421\\nAt Epoch: 99000.0\\nLoss 3.1194\\ntime elapsed 2096.3616\\nAt Epoch: 100000.0\\nLoss 2.9343\\ntime elapsed 2117.2292\\nAt Epoch: 101000.0\\nLoss 3.0200\\ntime elapsed 2138.0234\\nAt Epoch: 102000.0\\nLoss 3.2507\\ntime elapsed 2158.6976\\nAt Epoch: 103000.0\\nLoss 2.9581\\ntime elapsed 2180.0396\\nAt Epoch: 104000.0\\nLoss 3.2382\\ntime elapsed 2202.0479\\nAt Epoch: 105000.0\\nLoss 3.0344\\ntime elapsed 2223.0734\\nAt Epoch: 106000.0\\nLoss 2.9038\\ntime elapsed 2244.1027\\nAt Epoch: 107000.0\\nLoss 2.9855\\ntime elapsed 2265.6176\\nAt Epoch: 108000.0\\nLoss 3.0731\\ntime elapsed 2286.5905\\nAt Epoch: 109000.0\\nLoss 3.2424\\ntime elapsed 2307.5373\\nAt Epoch: 110000.0\\nLoss 2.9557\\ntime elapsed 2328.5830\\nAt Epoch: 111000.0\\nLoss 2.8459\\ntime elapsed 2349.3629\\nAt Epoch: 112000.0\\nLoss 3.1116\\ntime elapsed 2369.8899\\nAt Epoch: 113000.0\\nLoss 2.8973\\ntime elapsed 2391.1698\\nAt Epoch: 114000.0\\nLoss 3.0910\\ntime elapsed 2412.4004\\nAt Epoch: 115000.0\\nLoss 3.0290\\ntime elapsed 2433.3900\\nAt Epoch: 116000.0\\nLoss 2.8997\\ntime elapsed 2454.9702\\nAt Epoch: 117000.0\\nLoss 3.0588\\ntime elapsed 2476.0218\\nAt Epoch: 118000.0\\nLoss 3.0554\\ntime elapsed 2497.6892\\nAt Epoch: 119000.0\\nLoss 2.9734\\ntime elapsed 2519.9516\\nAt Epoch: 120000.0\\nLoss 3.1741\\ntime elapsed 2544.0268\\nAt Epoch: 121000.0\\nLoss 2.9140\\ntime elapsed 2565.9631\\nAt Epoch: 122000.0\\nLoss 2.8833\\ntime elapsed 2586.9483\\nAt Epoch: 123000.0\\nLoss 3.0268\\ntime elapsed 2607.9265\\nAt Epoch: 124000.0\\nLoss 2.9004\\ntime elapsed 2630.8401\\nAt Epoch: 125000.0\\nLoss 3.0086\\ntime elapsed 2651.4247\\nAt Epoch: 126000.0\\nLoss 3.0224\\ntime elapsed 2672.0303\\nAt Epoch: 127000.0\\nLoss 3.2991\\ntime elapsed 2693.3880\\nAt Epoch: 128000.0\\nLoss 3.1404\\ntime elapsed 2714.1731\\nAt Epoch: 129000.0\\nLoss 3.0332\\ntime elapsed 2736.7471\\nAt Epoch: 130000.0\\nLoss 3.2477\\ntime elapsed 2759.7213\\nAt Epoch: 131000.0\\nLoss 3.1290\\ntime elapsed 2780.8084\\nAt Epoch: 132000.0\\nLoss 2.9785\\ntime elapsed 2802.3767\\nAt Epoch: 133000.0\\nLoss 2.9272\\ntime elapsed 2822.8847\\nAt Epoch: 134000.0\\nLoss 2.9764\\ntime elapsed 2843.6123\\nAt Epoch: 135000.0\\nLoss 3.2959\\ntime elapsed 2864.3198\\nAt Epoch: 136000.0\\nLoss 2.9485\\ntime elapsed 2885.0592\\nAt Epoch: 137000.0\\nLoss 3.1048\\ntime elapsed 2906.0318\\nAt Epoch: 138000.0\\nLoss 2.8635\\ntime elapsed 2926.9689\\nAt Epoch: 139000.0\\nLoss 3.2045\\ntime elapsed 2947.7551\\nAt Epoch: 140000.0\\nLoss 3.0406\\ntime elapsed 2968.8903\\nAt Epoch: 141000.0\\nLoss 2.9509\\ntime elapsed 2989.9148\\nAt Epoch: 142000.0\\nLoss 2.9764\\ntime elapsed 3010.8331\\nAt Epoch: 143000.0\\nLoss 3.0912\\ntime elapsed 3031.6044\\nAt Epoch: 144000.0\\nLoss 3.0150\\ntime elapsed 3052.5004\\nAt Epoch: 145000.0\\nLoss 3.0335\\ntime elapsed 3075.1163\\nAt Epoch: 146000.0\\nLoss 2.9631\\ntime elapsed 3098.1154\\nAt Epoch: 147000.0\\nLoss 3.0329\\ntime elapsed 3121.1964\\nAt Epoch: 148000.0\\nLoss 3.4890\\ntime elapsed 3143.0822\\nAt Epoch: 149000.0\\nLoss 2.9324\\ntime elapsed 3164.6447\\nAt Epoch: 150000.0\\nLoss 2.8988\\ntime elapsed 3188.3173\\nAt Epoch: 151000.0\\nLoss 2.9578\\ntime elapsed 3208.9634\\nAt Epoch: 152000.0\\nLoss 3.0209\\ntime elapsed 3229.6342\\nAt Epoch: 153000.0\\nLoss 3.4023\\ntime elapsed 3251.5514\\nAt Epoch: 154000.0\\nLoss 2.9682\\ntime elapsed 3273.0930\\n\"\n ],\n [\n \"torch.save(model.state_dict(), \\\"../data/transformer_encoder_201012.pt\\\")\",\n \"_____no_output_____\"\n ],\n [\n \"print(\\\"done\\\")\",\n \"done\\n\"\n ],\n [\n \"ntokens = len(aminoacid_list) + 1 # the size of vocabulary\\nemsize = 128 # embedding dimension\\nnhid = 100 # the dimension of the feedforward network model in nn.TransformerEncoder\\nnlayers = 3 # the number of nn.TransformerEncoderLayer in nn.TransformerEncoder\\nnhead = 12 # the number of heads in the multiheadattention models\\ndropout = 0.1 # the dropout value\\nmodel = TransformerModel(ntokens, emsize, nhead, nhid, nlayers, dropout)\",\n \"_____no_output_____\"\n ]\n ]\n]"},"cell_types":{"kind":"list like","value":["markdown","code"],"string":"[\n \"markdown\",\n \"code\"\n]"},"cell_type_groups":{"kind":"list like","value":[["markdown"],["code","code","code","code","code","code","code","code","code","code","code","code"]],"string":"[\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\"\n ]\n]"}}},{"rowIdx":1458815,"cells":{"hexsha":{"kind":"string","value":"e7e32b42e2b84f3c6d1d86a6ece8056489abc5c5"},"size":{"kind":"number","value":5338,"string":"5,338"},"ext":{"kind":"string","value":"ipynb"},"lang":{"kind":"string","value":"Jupyter Notebook"},"max_stars_repo_path":{"kind":"string","value":"03-fundamentals-of-python/04-processing-files.ipynb"},"max_stars_repo_name":{"kind":"string","value":"incidentfrog/rcsc18_lessons"},"max_stars_repo_head_hexsha":{"kind":"string","value":"452a369410fd4bf86b40c081986929a12a29a1f8"},"max_stars_repo_licenses":{"kind":"list like","value":["CC-BY-4.0"],"string":"[\n \"CC-BY-4.0\"\n]"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2018-09-05T08:13:52.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2018-09-05T08:13:52.000Z"},"max_issues_repo_path":{"kind":"string","value":"03-fundamentals-of-python/04-processing-files.ipynb"},"max_issues_repo_name":{"kind":"string","value":"incidentfrog/rcsc18_lessons"},"max_issues_repo_head_hexsha":{"kind":"string","value":"452a369410fd4bf86b40c081986929a12a29a1f8"},"max_issues_repo_licenses":{"kind":"list like","value":["CC-BY-4.0"],"string":"[\n \"CC-BY-4.0\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"03-fundamentals-of-python/04-processing-files.ipynb"},"max_forks_repo_name":{"kind":"string","value":"incidentfrog/rcsc18_lessons"},"max_forks_repo_head_hexsha":{"kind":"string","value":"452a369410fd4bf86b40c081986929a12a29a1f8"},"max_forks_repo_licenses":{"kind":"list like","value":["CC-BY-4.0"],"string":"[\n \"CC-BY-4.0\"\n]"},"max_forks_count":{"kind":"number","value":23,"string":"23"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2018-09-05T08:13:54.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2018-12-18T16:00:05.000Z"},"avg_line_length":{"kind":"number","value":24.9439252336,"string":"24.943925"},"max_line_length":{"kind":"number","value":110,"string":"110"},"alphanum_fraction":{"kind":"number","value":0.560134882,"string":"0.560135"},"cells":{"kind":"list like","value":[[["# Analyzing Data from Multiple Files","_____no_output_____"],["We now have almost everything we need to process all our data files.\nThe only thing that's missing is a library with a rather unpleasant name:","_____no_output_____"],["The `glob` library contains a function, also called `glob`,\nthat finds files and directories whose names match a pattern.\nWe provide those patterns as strings:\nthe character `*` matches zero or more characters,\nwhile `?` matches any one character.\nWe can use this to get the names of all the CSV files in the current directory:","_____no_output_____"],["As these examples show,\n`glob.glob`'s result is a list of file and directory paths in arbitrary order.\nThis means we can loop over it\nto do something with each filename in turn.\nIn our case,\nthe \"something\" we want to do is generate a set of plots for each file in our inflammation dataset.\nIf we want to start by analyzing just the first three files in alphabetical order, we can use the\n`sorted` built-in function to generate a new sorted list from the `glob.glob` output:","_____no_output_____"],["Sure enough,\nthe maxima of the first two data sets show exactly the same ramp as the first,\nand their minima show the same staircase structure;\na different situation has been revealed in the third dataset,\nwhere the maxima are a bit less regular, but the minima are consistently zero.","_____no_output_____"],["\n
\n
\n

Challenge: Plotting Differences

\n
\n\n\n
\n\n

Plot the difference between the average of the first dataset\nand the average of the second dataset,\ni.e., the difference between the leftmost plot of the first two figures.

\n\n
\n\n
\n","_____no_output_____"],["\n
\n
\n

Solution

\n
\n\n
\n","_____no_output_____"],["\n
\n
\n

Challenge: Generate Composite Statistics

\n
\n\n\n
\n\n

Use each of the files once to generate a dataset containing values averaged over all patients:

\n\n
\n\n
\n","_____no_output_____"],["Then use pyplot to generate average, max, and min for all patients.","_____no_output_____"],["\n
\n
\n

Solution

\n
\n\n
\n","_____no_output_____"],["---\nThe material in this notebook is derived from the Software Carpentry lessons\n&copy; [Software Carpentry](http://software-carpentry.org/) under the terms\nof the [CC-BY 4.0](https://creativecommons.org/licenses/by/4.0/) license.","_____no_output_____"]]],"string":"[\n [\n [\n \"# Analyzing Data from Multiple Files\",\n \"_____no_output_____\"\n ],\n [\n \"We now have almost everything we need to process all our data files.\\nThe only thing that's missing is a library with a rather unpleasant name:\",\n \"_____no_output_____\"\n ],\n [\n \"The `glob` library contains a function, also called `glob`,\\nthat finds files and directories whose names match a pattern.\\nWe provide those patterns as strings:\\nthe character `*` matches zero or more characters,\\nwhile `?` matches any one character.\\nWe can use this to get the names of all the CSV files in the current directory:\",\n \"_____no_output_____\"\n ],\n [\n \"As these examples show,\\n`glob.glob`'s result is a list of file and directory paths in arbitrary order.\\nThis means we can loop over it\\nto do something with each filename in turn.\\nIn our case,\\nthe \\\"something\\\" we want to do is generate a set of plots for each file in our inflammation dataset.\\nIf we want to start by analyzing just the first three files in alphabetical order, we can use the\\n`sorted` built-in function to generate a new sorted list from the `glob.glob` output:\",\n \"_____no_output_____\"\n ],\n [\n \"Sure enough,\\nthe maxima of the first two data sets show exactly the same ramp as the first,\\nand their minima show the same staircase structure;\\na different situation has been revealed in the third dataset,\\nwhere the maxima are a bit less regular, but the minima are consistently zero.\",\n \"_____no_output_____\"\n ],\n [\n \"\\n
\\n
\\n

Challenge: Plotting Differences

\\n
\\n\\n\\n
\\n\\n

Plot the difference between the average of the first dataset\\nand the average of the second dataset,\\ni.e., the difference between the leftmost plot of the first two figures.

\\n\\n
\\n\\n
\\n\",\n \"_____no_output_____\"\n ],\n [\n \"\\n
\\n
\\n

Solution

\\n
\\n\\n
\\n\",\n \"_____no_output_____\"\n ],\n [\n \"\\n
\\n
\\n

Challenge: Generate Composite Statistics

\\n
\\n\\n\\n
\\n\\n

Use each of the files once to generate a dataset containing values averaged over all patients:

\\n\\n
\\n\\n
\\n\",\n \"_____no_output_____\"\n ],\n [\n \"Then use pyplot to generate average, max, and min for all patients.\",\n \"_____no_output_____\"\n ],\n [\n \"\\n
\\n
\\n

Solution

\\n
\\n\\n
\\n\",\n \"_____no_output_____\"\n ],\n [\n \"---\\nThe material in this notebook is derived from the Software Carpentry lessons\\n&copy; [Software Carpentry](http://software-carpentry.org/) under the terms\\nof the [CC-BY 4.0](https://creativecommons.org/licenses/by/4.0/) license.\",\n \"_____no_output_____\"\n ]\n ]\n]"},"cell_types":{"kind":"list like","value":["markdown"],"string":"[\n \"markdown\"\n]"},"cell_type_groups":{"kind":"list like","value":[["markdown","markdown","markdown","markdown","markdown","markdown","markdown","markdown","markdown","markdown","markdown"]],"string":"[\n [\n \"markdown\",\n \"markdown\",\n \"markdown\",\n \"markdown\",\n \"markdown\",\n \"markdown\",\n \"markdown\",\n \"markdown\",\n \"markdown\",\n \"markdown\",\n \"markdown\"\n ]\n]"}}},{"rowIdx":1458816,"cells":{"hexsha":{"kind":"string","value":"e7e3363d365c76f9273c7d6db0dcc85efe21628b"},"size":{"kind":"number","value":272681,"string":"272,681"},"ext":{"kind":"string","value":"ipynb"},"lang":{"kind":"string","value":"Jupyter Notebook"},"max_stars_repo_path":{"kind":"string","value":"docs/source/examples/meridional_overturning.ipynb"},"max_stars_repo_name":{"kind":"string","value":"gustavo-marques/mom6-tools"},"max_stars_repo_head_hexsha":{"kind":"string","value":"5c7d95ed5449317529a45c35333ade4a36f3f2b1"},"max_stars_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"docs/source/examples/meridional_overturning.ipynb"},"max_issues_repo_name":{"kind":"string","value":"gustavo-marques/mom6-tools"},"max_issues_repo_head_hexsha":{"kind":"string","value":"5c7d95ed5449317529a45c35333ade4a36f3f2b1"},"max_issues_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"docs/source/examples/meridional_overturning.ipynb"},"max_forks_repo_name":{"kind":"string","value":"gustavo-marques/mom6-tools"},"max_forks_repo_head_hexsha":{"kind":"string","value":"5c7d95ed5449317529a45c35333ade4a36f3f2b1"},"max_forks_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"avg_line_length":{"kind":"number","value":1036.8098859316,"string":"1,036.809886"},"max_line_length":{"kind":"number","value":139224,"string":"139,224"},"alphanum_fraction":{"kind":"number","value":0.9550170346,"string":"0.955017"},"cells":{"kind":"list like","value":[[["# Meridional Overturning\n\n`mom6_tools.moc` functions for computing and plotting meridional overturning. \n\nThe goal of this notebook is the following:\n\n1) server as an example on to compute a meridional overturning streamfunction (global and Atalntic) from CESM/MOM output; \n\n2) evaluate model experiments by comparing transports against observed estimates;\n\n3) compare model results vs. another model results (TODO).","_____no_output_____"]],[["%matplotlib inline\nimport matplotlib\nimport numpy as np\nimport xarray as xr\n# mom6_tools\nfrom mom6_tools.moc import *\nfrom mom6_tools.m6toolbox import check_time_interval, genBasinMasks \nimport matplotlib.pyplot as plt\n","_____no_output_____"],["# The following parameters must be set accordingly\n######################################################\n# case name - must be changed for each configuration\ncase_name = 'g.c2b6.GNYF.T62_t061.long_run_nuopc.001'\n# Path to the run directory\npath = \"/glade/scratch/gmarques/g.c2b6.GNYF.T62_t061.long_run_nuopc.001/run/\"\n# initial and final years for computing time mean\nyear_start = 80\nyear_end = 90\n# add your name and email address below\nauthor = 'Gustavo Marques (gmarques@ucar.edu)'\n######################################################\n# create an empty class object\nclass args:\n pass\n\nargs.infile = path\nargs.static = 'g.c2b6.GNYF.T62_t061.long_run_nuopc.001.mom6.static.nc'\nargs.monthly = 'g.c2b6.GNYF.T62_t061.long_run_nuopc.001.mom6.hm_*nc'\nargs.year_start = year_start\nargs.year_end = year_end\nargs.case_name = case_name\nargs.label = ''\nargs.savefigs = False","_____no_output_____"],["stream = True\n# mom6 grid\ngrd = MOM6grid(args.infile+args.static)\ndepth = grd.depth_ocean\n# remote Nan's, otherwise genBasinMasks won't work\ndepth[numpy.isnan(depth)] = 0.0\nbasin_code = m6toolbox.genBasinMasks(grd.geolon, grd.geolat, depth)\n\n# load data\nds = xr.open_mfdataset(args.infile+args.monthly,decode_times=False)\n# convert time in years\nds['time'] = ds.time/365.\nti = args.year_start\ntf = args.year_end\n# check if data includes years between ti and tf\ncheck_time_interval(ti,tf,ds)\n\n# create a ndarray subclass\nclass C(numpy.ndarray): pass\n\nif 'vmo' in ds.variables:\n varName = 'vmo'; conversion_factor = 1.e-9\nelif 'vh' in ds.variables:\n varName = 'vh'; conversion_factor = 1.e-6\n if 'zw' in ds.variables: conversion_factor = 1.e-9 # Backwards compatible for when we had wrong units for 'vh'\nelse: raise Exception('Could not find \"vh\" or \"vmo\" in file \"%s\"'%(args.infile+args.static))\n \n\ntmp = np.ma.masked_invalid(ds[varName].sel(time=slice(ti,tf)).mean('time').data)\ntmp = tmp[:].filled(0.)\nVHmod = tmp.view(C)\nVHmod.units = ds[varName].units\n\n\nZmod = m6toolbox.get_z(ds, depth, varName)\n\nif args.case_name != '': case_name = args.case_name + ' ' + args.label\nelse: case_name = rootGroup.title + ' ' + args.label\n","MOM6 grid successfully loaded... \n\n11.16428 64.78855 [391, 434]\n"],["# Global MOC\nm6plot.setFigureSize([16,9],576,debug=False)\naxis = plt.gca()\ncmap = plt.get_cmap('dunnePM')\nz = Zmod.min(axis=-1); psiPlot = MOCpsi(VHmod)*conversion_factor\npsiPlot = 0.5 * (psiPlot[0:-1,:]+psiPlot[1::,:])\n#yy = y[1:,:].max(axis=-1)+0*z\nyy = grd.geolat_c[:,:].max(axis=-1)+0*z\nprint(z.shape, yy.shape, psiPlot.shape)\nci=m6plot.pmCI(0.,40.,5.)\nplotPsi(yy, z, psiPlot, ci, 'Global MOC [Sv]')\nplt.xlabel(r'Latitude [$\\degree$N]')\nplt.suptitle(case_name)\n#findExtrema(yy, z, psiPlot, max_lat=-30.)\n#findExtrema(yy, z, psiPlot, min_lat=25.)\n#findExtrema(yy, z, psiPlot, min_depth=2000., mult=-1.)","(60, 458) (60, 458) (60, 458)\n"],["# Atlantic MOC\nm6plot.setFigureSize([16,9],576,debug=False)\ncmap = plt.get_cmap('dunnePM')\nm = 0*basin_code; m[(basin_code==2) | (basin_code==4) | (basin_code==6) | (basin_code==7) | (basin_code==8)]=1\nci=m6plot.pmCI(0.,22.,2.)\nz = (m*Zmod).min(axis=-1); psiPlot = MOCpsi(VHmod, vmsk=m*numpy.roll(m,-1,axis=-2))*conversion_factor\npsiPlot = 0.5 * (psiPlot[0:-1,:]+psiPlot[1::,:])\n#yy = y[1:,:].max(axis=-1)+0*z\nyy = grd.geolat_c[:,:].max(axis=-1)+0*z\nplotPsi(yy, z, psiPlot, ci, 'Atlantic MOC [Sv]')\nplt.xlabel(r'Latitude [$\\degree$N]')\nplt.suptitle(case_name)\n#findExtrema(yy, z, psiPlot, min_lat=26.5, max_lat=27.) # RAPID\n#findExtrema(yy, z, psiPlot, max_lat=-33.)\n#findExtrema(yy, z, psiPlot)\n#findExtrema(yy, z, psiPlot, min_lat=5.)\n","_____no_output_____"]]],"string":"[\n [\n [\n \"# Meridional Overturning\\n\\n`mom6_tools.moc` functions for computing and plotting meridional overturning. \\n\\nThe goal of this notebook is the following:\\n\\n1) server as an example on to compute a meridional overturning streamfunction (global and Atalntic) from CESM/MOM output; \\n\\n2) evaluate model experiments by comparing transports against observed estimates;\\n\\n3) compare model results vs. another model results (TODO).\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"%matplotlib inline\\nimport matplotlib\\nimport numpy as np\\nimport xarray as xr\\n# mom6_tools\\nfrom mom6_tools.moc import *\\nfrom mom6_tools.m6toolbox import check_time_interval, genBasinMasks \\nimport matplotlib.pyplot as plt\\n\",\n \"_____no_output_____\"\n ],\n [\n \"# The following parameters must be set accordingly\\n######################################################\\n# case name - must be changed for each configuration\\ncase_name = 'g.c2b6.GNYF.T62_t061.long_run_nuopc.001'\\n# Path to the run directory\\npath = \\\"/glade/scratch/gmarques/g.c2b6.GNYF.T62_t061.long_run_nuopc.001/run/\\\"\\n# initial and final years for computing time mean\\nyear_start = 80\\nyear_end = 90\\n# add your name and email address below\\nauthor = 'Gustavo Marques (gmarques@ucar.edu)'\\n######################################################\\n# create an empty class object\\nclass args:\\n pass\\n\\nargs.infile = path\\nargs.static = 'g.c2b6.GNYF.T62_t061.long_run_nuopc.001.mom6.static.nc'\\nargs.monthly = 'g.c2b6.GNYF.T62_t061.long_run_nuopc.001.mom6.hm_*nc'\\nargs.year_start = year_start\\nargs.year_end = year_end\\nargs.case_name = case_name\\nargs.label = ''\\nargs.savefigs = False\",\n \"_____no_output_____\"\n ],\n [\n \"stream = True\\n# mom6 grid\\ngrd = MOM6grid(args.infile+args.static)\\ndepth = grd.depth_ocean\\n# remote Nan's, otherwise genBasinMasks won't work\\ndepth[numpy.isnan(depth)] = 0.0\\nbasin_code = m6toolbox.genBasinMasks(grd.geolon, grd.geolat, depth)\\n\\n# load data\\nds = xr.open_mfdataset(args.infile+args.monthly,decode_times=False)\\n# convert time in years\\nds['time'] = ds.time/365.\\nti = args.year_start\\ntf = args.year_end\\n# check if data includes years between ti and tf\\ncheck_time_interval(ti,tf,ds)\\n\\n# create a ndarray subclass\\nclass C(numpy.ndarray): pass\\n\\nif 'vmo' in ds.variables:\\n varName = 'vmo'; conversion_factor = 1.e-9\\nelif 'vh' in ds.variables:\\n varName = 'vh'; conversion_factor = 1.e-6\\n if 'zw' in ds.variables: conversion_factor = 1.e-9 # Backwards compatible for when we had wrong units for 'vh'\\nelse: raise Exception('Could not find \\\"vh\\\" or \\\"vmo\\\" in file \\\"%s\\\"'%(args.infile+args.static))\\n \\n\\ntmp = np.ma.masked_invalid(ds[varName].sel(time=slice(ti,tf)).mean('time').data)\\ntmp = tmp[:].filled(0.)\\nVHmod = tmp.view(C)\\nVHmod.units = ds[varName].units\\n\\n\\nZmod = m6toolbox.get_z(ds, depth, varName)\\n\\nif args.case_name != '': case_name = args.case_name + ' ' + args.label\\nelse: case_name = rootGroup.title + ' ' + args.label\\n\",\n \"MOM6 grid successfully loaded... \\n\\n11.16428 64.78855 [391, 434]\\n\"\n ],\n [\n \"# Global MOC\\nm6plot.setFigureSize([16,9],576,debug=False)\\naxis = plt.gca()\\ncmap = plt.get_cmap('dunnePM')\\nz = Zmod.min(axis=-1); psiPlot = MOCpsi(VHmod)*conversion_factor\\npsiPlot = 0.5 * (psiPlot[0:-1,:]+psiPlot[1::,:])\\n#yy = y[1:,:].max(axis=-1)+0*z\\nyy = grd.geolat_c[:,:].max(axis=-1)+0*z\\nprint(z.shape, yy.shape, psiPlot.shape)\\nci=m6plot.pmCI(0.,40.,5.)\\nplotPsi(yy, z, psiPlot, ci, 'Global MOC [Sv]')\\nplt.xlabel(r'Latitude [$\\\\degree$N]')\\nplt.suptitle(case_name)\\n#findExtrema(yy, z, psiPlot, max_lat=-30.)\\n#findExtrema(yy, z, psiPlot, min_lat=25.)\\n#findExtrema(yy, z, psiPlot, min_depth=2000., mult=-1.)\",\n \"(60, 458) (60, 458) (60, 458)\\n\"\n ],\n [\n \"# Atlantic MOC\\nm6plot.setFigureSize([16,9],576,debug=False)\\ncmap = plt.get_cmap('dunnePM')\\nm = 0*basin_code; m[(basin_code==2) | (basin_code==4) | (basin_code==6) | (basin_code==7) | (basin_code==8)]=1\\nci=m6plot.pmCI(0.,22.,2.)\\nz = (m*Zmod).min(axis=-1); psiPlot = MOCpsi(VHmod, vmsk=m*numpy.roll(m,-1,axis=-2))*conversion_factor\\npsiPlot = 0.5 * (psiPlot[0:-1,:]+psiPlot[1::,:])\\n#yy = y[1:,:].max(axis=-1)+0*z\\nyy = grd.geolat_c[:,:].max(axis=-1)+0*z\\nplotPsi(yy, z, psiPlot, ci, 'Atlantic MOC [Sv]')\\nplt.xlabel(r'Latitude [$\\\\degree$N]')\\nplt.suptitle(case_name)\\n#findExtrema(yy, z, psiPlot, min_lat=26.5, max_lat=27.) # RAPID\\n#findExtrema(yy, z, psiPlot, max_lat=-33.)\\n#findExtrema(yy, z, psiPlot)\\n#findExtrema(yy, z, psiPlot, min_lat=5.)\\n\",\n \"_____no_output_____\"\n ]\n ]\n]"},"cell_types":{"kind":"list like","value":["markdown","code"],"string":"[\n \"markdown\",\n \"code\"\n]"},"cell_type_groups":{"kind":"list like","value":[["markdown"],["code","code","code","code","code"]],"string":"[\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\"\n ]\n]"}}},{"rowIdx":1458817,"cells":{"hexsha":{"kind":"string","value":"e7e34d3113355e4704374c6ab32fe88b6def3155"},"size":{"kind":"number","value":30228,"string":"30,228"},"ext":{"kind":"string","value":"ipynb"},"lang":{"kind":"string","value":"Jupyter Notebook"},"max_stars_repo_path":{"kind":"string","value":"(b)intro 2.ipynb"},"max_stars_repo_name":{"kind":"string","value":"fahimalamabir/scalable_machine_learning_Apache_Spark"},"max_stars_repo_head_hexsha":{"kind":"string","value":"04f2057f2575f5dc5aa4563f6ad8ca7818fd0c52"},"max_stars_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"(b)intro 2.ipynb"},"max_issues_repo_name":{"kind":"string","value":"fahimalamabir/scalable_machine_learning_Apache_Spark"},"max_issues_repo_head_hexsha":{"kind":"string","value":"04f2057f2575f5dc5aa4563f6ad8ca7818fd0c52"},"max_issues_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"(b)intro 2.ipynb"},"max_forks_repo_name":{"kind":"string","value":"fahimalamabir/scalable_machine_learning_Apache_Spark"},"max_forks_repo_head_hexsha":{"kind":"string","value":"04f2057f2575f5dc5aa4563f6ad8ca7818fd0c52"},"max_forks_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"avg_line_length":{"kind":"number","value":26.1487889273,"string":"26.148789"},"max_line_length":{"kind":"number","value":531,"string":"531"},"alphanum_fraction":{"kind":"number","value":0.4501124785,"string":"0.450112"},"cells":{"kind":"list like","value":[[["try:\n from pyspark import SparkContext, SparkConf\n from pyspark.sql import SparkSession\nexcept ImportError as e:\n printmd('<<<<>>>>')","_____no_output_____"],["sc = SparkContext.getOrCreate(SparkConf().setMaster(\"local[*]\"))\n\nspark = SparkSession \\\n .builder \\\n .getOrCreate()","_____no_output_____"]],[["Mean = $\\frac{1}{n} \\sum_{i=1}^n a_i$","_____no_output_____"]],[["# create a rdd from 0 to 99\nrdd = sc.parallelize(range(100))\nsum_ = rdd.sum()\nn = rdd.count()\nmean = sum_/n\nprint(mean)","49.5\n"]],[["Median
\n(1) sort the list
\n(2) pick the middle element
","_____no_output_____"]],[["rdd.collect()","_____no_output_____"],["rdd.sortBy(lambda x:x).collect()","_____no_output_____"]],[["To access the middle element, we need to access the index.","_____no_output_____"]],[["rdd.sortBy(lambda x:x).zipWithIndex().collect()","_____no_output_____"],["sortedandindexed = rdd.sortBy(lambda x:x).zipWithIndex().map(lambda x:x)\nn = sortedandindexed.count()\nif (n%2 == 1):\n index = (n-1)/2;\n print(sortedandindexed.lookup(index))\nelse:\n index1 = (n/2)-1\n index2 = n/2\n value1 = sortedandindexed.lookup(index1)[0]\n value2 = sortedandindexed.lookup(index2)[0]\n print((value1 + value2)/2)","49.5\n"]],[["Standard Deviation:
\n - tells you how wide the is spread around the mean
\n so if SD is low, all the values should be close to the mean
\n - to calculate it first calculate the mean $\\bar{x}$
\n - SD = $\\sqrt{\\frac{1}{N}\\sum_{i=1}^N(x_i - \\bar{x})^2}$
","_____no_output_____"]],[["from math import sqrt\nsum_ = rdd.sum()\nn = rdd.count()\nmean = sum_/n\nsqrt(rdd.map(lambda x: pow(x-mean,2)).sum()/n)","_____no_output_____"]],[["Skewness
\n- tells us how asymmetric data is spread around the mean
\n- check positive skew, negative skew
\n- Skew = $\\frac{1}{n}\\frac{\\sum_{j=1}^n (x_j- \\bar{x})^3}{\\text{SD}^3}$, x_j= individual value","_____no_output_____"]],[["sd= sqrt(rdd.map(lambda x: pow(x-mean,2)).sum()/n)\nn = float(n) # to round off\nskw = (1/n)*rdd.map(lambda x : pow(x- mean,3)/pow(sd,3)).sum()\nskw","_____no_output_____"]],[["Kurtosis
\n\n- tells us the shape of the data
\n- indicates outlier content within the data
\n- kurt = $\\frac{1}{n}\\frac{\\sum_{j=1}^n (x_j- \\bar{x})^4}{\\text{SD}^4}$, x_j= individual value\n","_____no_output_____"]],[["(1/n)*rdd.map(lambda x : pow(x- mean,4)/pow(sd,4)).sum()\n","_____no_output_____"]],[["Covariance \\& Correlation
\n\n- how two columns interact with each other
\n- how all columns interact with each other
\n- cov(X,Y) = $\\frac{1}{n} \\sum_{i=1}^n (x_i-\\bar{x})(y_i -\\bar{y})$","_____no_output_____"]],[["rddX = sc.parallelize(range(100))\nrddY = sc.parallelize(range(100))","_____no_output_____"],["# to avoid loss of precision use float\nmeanX = rddX.sum()/float(rddX.count())\nmeanY = rddY.sum()/float(rddY.count())","_____no_output_____"],["# since we need to use rddx, rddy same time we need to zip them together\nrddXY = rddX.zip(rddY)\ncovXY = rddXY.map(lambda x:(x[0]-meanX)*(x[1]-meanY)).sum()/rddXY.count()\ncovXY","_____no_output_____"]],[["Correlation\n\n- corr(X,Y) =$ \\frac{\\text{cov(X,Y)}}{SD_X SD_Y}$\n
\n\nMeasure of dependency - Correlation
\n +1 Columns totally correlate
\n 0 columns show no interaction
\n -1 inverse dependency","_____no_output_____"]],[["from math import sqrt\nn = rddXY.count()\nmean = sum_/n\nSDX = sqrt(rdd.map(lambda x: pow(x-meanX,2)).sum()/n)\nSDY = sqrt(rdd.map(lambda y: pow(y-meanY,2)).sum()/n)\ncorrXY = covXY/(SDX *SDY)\ncorrXY","_____no_output_____"],["# corellation matrix in practice\nimport random\nfrom pyspark.mllib.stat import Statistics\ncol1 = sc.parallelize(range(100))\ncol2 = sc.parallelize(range(100,200))\ncol3 = sc.parallelize(list(reversed(range(100))))\ncol4 = sc.parallelize(random.sample(range(100),100))\ndata = col1\ndata.take(5)","_____no_output_____"],["data1 = col1.zip(col2)\ndata1.take(5)","_____no_output_____"]],[["Welcome to exercise one of week two of “Apache Spark for Scalable Machine Learning on BigData”. In this exercise you’ll read a DataFrame in order to perform a simple statistical analysis. Then you’ll rebalance the dataset. No worries, we’ll explain everything to you, let’s get started.\n\nLet’s create a data frame from a remote file by downloading it:\n","_____no_output_____"]],[["# delete files from previous runs\n!rm -f hmp.parquet*\n\n# download the file containing the data in PARQUET format\n!wget https://github.com/IBM/coursera/raw/master/hmp.parquet\n \n# create a dataframe out of it\ndf = spark.read.parquet('hmp.parquet')\n\n# register a corresponding query table\ndf.createOrReplaceTempView('df')","--2020-11-06 02:38:52-- https://github.com/IBM/coursera/raw/master/hmp.parquet\nResolving github.com (github.com)... 140.82.114.3\nConnecting to github.com (github.com)|140.82.114.3|:443... connected.\nHTTP request sent, awaiting response... 301 Moved Permanently\nLocation: https://github.com/IBM/skillsnetwork/raw/master/hmp.parquet [following]\n--2020-11-06 02:38:52-- https://github.com/IBM/skillsnetwork/raw/master/hmp.parquet\nReusing existing connection to github.com:443.\nHTTP request sent, awaiting response... 302 Found\nLocation: https://raw.githubusercontent.com/IBM/skillsnetwork/master/hmp.parquet [following]\n--2020-11-06 02:38:53-- https://raw.githubusercontent.com/IBM/skillsnetwork/master/hmp.parquet\nResolving raw.githubusercontent.com (raw.githubusercontent.com)... 151.101.52.133\nConnecting to raw.githubusercontent.com (raw.githubusercontent.com)|151.101.52.133|:443... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 932997 (911K) [application/octet-stream]\nSaving to: ‘hmp.parquet’\n\nhmp.parquet 100%[===================>] 911.13K 3.79MB/s in 0.2s \n\n2020-11-06 02:38:53 (3.79 MB/s) - ‘hmp.parquet’ saved [932997/932997]\n\n"]],[["This is a classical classification data set. One thing we always do during data analysis is checking if the classes are balanced. In other words, if there are more or less the same number of example in each class. Let’s find out by a simple aggregation using SQL.\n","_____no_output_____"]],[["from pyspark.sql.functions import col\ncounts = df.groupBy('class').count().orderBy('count')\ndisplay(counts)","_____no_output_____"],["df.groupBy('class').count().show()","+--------------+-----+\n| class|count|\n+--------------+-----+\n| Use_telephone|15225|\n| Standup_chair|25417|\n| Eat_meat|31236|\n| Getup_bed|45801|\n| Drink_glass|42792|\n| Pour_water|41673|\n| Comb_hair|23504|\n| Walk|92254|\n| Climb_stairs|40258|\n| Sitdown_chair|25036|\n| Liedown_bed|11446|\n|Descend_stairs|15375|\n| Brush_teeth|29829|\n| Eat_soup| 6683|\n+--------------+-----+\n\n"],["spark.sql('select class,count(*) from df group by class').show()","+--------------+--------+\n| class|count(1)|\n+--------------+--------+\n| Use_telephone| 15225|\n| Standup_chair| 25417|\n| Eat_meat| 31236|\n| Getup_bed| 45801|\n| Drink_glass| 42792|\n| Pour_water| 41673|\n| Comb_hair| 23504|\n| Walk| 92254|\n| Climb_stairs| 40258|\n| Sitdown_chair| 25036|\n| Liedown_bed| 11446|\n|Descend_stairs| 15375|\n| Brush_teeth| 29829|\n| Eat_soup| 6683|\n+--------------+--------+\n\n"]],[["This looks nice, but it would be nice if we can aggregate further to obtain some quantitative metrics on the imbalance like, min, max, mean and standard deviation. If we divide max by min we get a measure called minmax ration which tells us something about the relationship between the smallest and largest class. Again, let’s first use SQL for those of you familiar with SQL. Don’t be scared, we’re used nested sub-selects, basically selecting from a result of a SQL query like it was a table. All within on SQL statement.\n","_____no_output_____"]],[["spark.sql('''\n select \n *,\n max/min as minmaxratio -- compute minmaxratio based on previously computed values\n from (\n select \n min(ct) as min, -- compute minimum value of all classes\n max(ct) as max, -- compute maximum value of all classes\n mean(ct) as mean, -- compute mean between all classes\n stddev(ct) as stddev -- compute standard deviation between all classes\n from (\n select\n count(*) as ct -- count the number of rows per class and rename it to ct\n from df -- access the temporary query table called df backed by DataFrame df\n group by class -- aggrecate over class\n )\n ) \n''').show()","+----+-----+------------------+------------------+-----------------+\n| min| max| mean| stddev| minmaxratio|\n+----+-----+------------------+------------------+-----------------+\n|6683|92254|31894.928571428572|21284.893716741157|13.80427951518779|\n+----+-----+------------------+------------------+-----------------+\n\n"]],[["The same query can be expressed using the DataFrame API. Again, don’t be scared. It’s just a sequential expression of transformation steps. You now an choose which syntax you like better.\n","_____no_output_____"]],[["df.show()\ndf.printSchema()","+---+---+---+--------------------+-----------+\n| x| y| z| source| class|\n+---+---+---+--------------------+-----------+\n| 22| 49| 35|Accelerometer-201...|Brush_teeth|\n| 22| 49| 35|Accelerometer-201...|Brush_teeth|\n| 22| 52| 35|Accelerometer-201...|Brush_teeth|\n| 22| 52| 35|Accelerometer-201...|Brush_teeth|\n| 21| 52| 34|Accelerometer-201...|Brush_teeth|\n| 22| 51| 34|Accelerometer-201...|Brush_teeth|\n| 20| 50| 35|Accelerometer-201...|Brush_teeth|\n| 22| 52| 34|Accelerometer-201...|Brush_teeth|\n| 22| 50| 34|Accelerometer-201...|Brush_teeth|\n| 22| 51| 35|Accelerometer-201...|Brush_teeth|\n| 21| 51| 33|Accelerometer-201...|Brush_teeth|\n| 20| 50| 34|Accelerometer-201...|Brush_teeth|\n| 21| 49| 33|Accelerometer-201...|Brush_teeth|\n| 21| 49| 33|Accelerometer-201...|Brush_teeth|\n| 20| 51| 35|Accelerometer-201...|Brush_teeth|\n| 18| 49| 34|Accelerometer-201...|Brush_teeth|\n| 19| 48| 34|Accelerometer-201...|Brush_teeth|\n| 16| 53| 34|Accelerometer-201...|Brush_teeth|\n| 18| 52| 35|Accelerometer-201...|Brush_teeth|\n| 18| 51| 32|Accelerometer-201...|Brush_teeth|\n+---+---+---+--------------------+-----------+\nonly showing top 20 rows\n\nroot\n |-- x: integer (nullable = true)\n |-- y: integer (nullable = true)\n |-- z: integer (nullable = true)\n |-- source: string (nullable = true)\n |-- class: string (nullable = true)\n\n"],["from pyspark.sql.functions import col, min, max, mean, stddev\n\ndf \\\n .groupBy('class') \\\n .count() \\\n .select([ \n min(col(\"count\")).alias('min'), \n max(col(\"count\")).alias('max'), \n mean(col(\"count\")).alias('mean'), \n stddev(col(\"count\")).alias('stddev') \n ]) \\\n .select([\n col('*'),\n (col(\"max\") / col(\"min\")).alias('minmaxratio')\n ]) \\\n .show()\n","+----+-----+------------------+------------------+-----------------+\n| min| max| mean| stddev| minmaxratio|\n+----+-----+------------------+------------------+-----------------+\n|6683|92254|31894.928571428572|21284.893716741157|13.80427951518779|\n+----+-----+------------------+------------------+-----------------+\n\n"]],[["Now it’s time for you to work on the data set. First, please create a table of all classes with the respective counts, but this time, please order the table by the count number, ascending.\n","_____no_output_____"]],[["df1 = df.groupBy('class').count()\ndf1.sort('count',ascending=True).show()","+--------------+-----+\n| class|count|\n+--------------+-----+\n| Eat_soup| 6683|\n| Liedown_bed|11446|\n| Use_telephone|15225|\n|Descend_stairs|15375|\n| Comb_hair|23504|\n| Sitdown_chair|25036|\n| Standup_chair|25417|\n| Brush_teeth|29829|\n| Eat_meat|31236|\n| Climb_stairs|40258|\n| Pour_water|41673|\n| Drink_glass|42792|\n| Getup_bed|45801|\n| Walk|92254|\n+--------------+-----+\n\n"]],[["Pixiedust is a very sophisticated library. It takes care of sorting as well. Please modify the bar chart so that it gets sorted by the number of elements per class, ascending. Hint: It’s an option available in the UI once rendered using the display() function.\n","_____no_output_____"]],[["import pixiedust\nfrom pyspark.sql.functions import col\ncounts = df.groupBy('class').count().orderBy('count')\ndisplay(counts)","_____no_output_____"]],[["Imbalanced classes can cause pain in machine learning. Therefore let’s rebalance. In the flowing we limit the number of elements per class to the amount of the least represented class. This is called undersampling. Other ways of rebalancing can be found here:\n\n[https://machinelearningmastery.com/tactics-to-combat-imbalanced-classes-in-your-machine-learning-dataset/](https://machinelearningmastery.com/tactics-to-combat-imbalanced-classes-in-your-machine-learning-dataset?cm_mmc=Email_Newsletter-_-Developer_Ed%2BTech-_-WW_WW-_-SkillsNetwork-Courses-IBMDeveloperSkillsNetwork-ML0201EN-SkillsNetwork-20647446&cm_mmca1=000026UJ&cm_mmca2=10006555&cm_mmca3=M12345678&cvosrc=email.Newsletter.M12345678&cvo_campaign=000026UJ)\n","_____no_output_____"]],[["from pyspark.sql.functions import min\n\n# create a lot of distinct classes from the dataset\nclasses = [row[0] for row in df.select('class').distinct().collect()]\n\n# compute the number of elements of the smallest class in order to limit the number of samples per calss\nmin = df.groupBy('class').count().select(min('count')).first()[0]\n\n# define the result dataframe variable\ndf_balanced = None\n\n# iterate over distinct classes\nfor cls in classes:\n \n # only select examples for the specific class within this iteration\n # shuffle the order of the elements (by setting fraction to 1.0 sample works like shuffle)\n # return only the first n samples\n df_temp = df \\\n .filter(\"class = '\"+cls+\"'\") \\\n .sample(False, 1.0) \\\n .limit(min)\n \n # on first iteration, assing df_temp to empty df_balanced\n if df_balanced == None: \n df_balanced = df_temp\n # afterwards, append vertically\n else:\n df_balanced=df_balanced.union(df_temp)","_____no_output_____"]],[["Please verify, by using the code cell below, if df_balanced has the same number of elements per class. You should get 6683 elements per class.\n","_____no_output_____"]],[["$$$","_____no_output_____"]]],"string":"[\n [\n [\n \"try:\\n from pyspark import SparkContext, SparkConf\\n from pyspark.sql import SparkSession\\nexcept ImportError as e:\\n printmd('<<<<>>>>')\",\n \"_____no_output_____\"\n ],\n [\n \"sc = SparkContext.getOrCreate(SparkConf().setMaster(\\\"local[*]\\\"))\\n\\nspark = SparkSession \\\\\\n .builder \\\\\\n .getOrCreate()\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"Mean = $\\\\frac{1}{n} \\\\sum_{i=1}^n a_i$\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# create a rdd from 0 to 99\\nrdd = sc.parallelize(range(100))\\nsum_ = rdd.sum()\\nn = rdd.count()\\nmean = sum_/n\\nprint(mean)\",\n \"49.5\\n\"\n ]\n ],\n [\n [\n \"Median
\\n(1) sort the list
\\n(2) pick the middle element
\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"rdd.collect()\",\n \"_____no_output_____\"\n ],\n [\n \"rdd.sortBy(lambda x:x).collect()\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"To access the middle element, we need to access the index.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"rdd.sortBy(lambda x:x).zipWithIndex().collect()\",\n \"_____no_output_____\"\n ],\n [\n \"sortedandindexed = rdd.sortBy(lambda x:x).zipWithIndex().map(lambda x:x)\\nn = sortedandindexed.count()\\nif (n%2 == 1):\\n index = (n-1)/2;\\n print(sortedandindexed.lookup(index))\\nelse:\\n index1 = (n/2)-1\\n index2 = n/2\\n value1 = sortedandindexed.lookup(index1)[0]\\n value2 = sortedandindexed.lookup(index2)[0]\\n print((value1 + value2)/2)\",\n \"49.5\\n\"\n ]\n ],\n [\n [\n \"Standard Deviation:
\\n - tells you how wide the is spread around the mean
\\n so if SD is low, all the values should be close to the mean
\\n - to calculate it first calculate the mean $\\\\bar{x}$
\\n - SD = $\\\\sqrt{\\\\frac{1}{N}\\\\sum_{i=1}^N(x_i - \\\\bar{x})^2}$
\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"from math import sqrt\\nsum_ = rdd.sum()\\nn = rdd.count()\\nmean = sum_/n\\nsqrt(rdd.map(lambda x: pow(x-mean,2)).sum()/n)\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"Skewness
\\n- tells us how asymmetric data is spread around the mean
\\n- check positive skew, negative skew
\\n- Skew = $\\\\frac{1}{n}\\\\frac{\\\\sum_{j=1}^n (x_j- \\\\bar{x})^3}{\\\\text{SD}^3}$, x_j= individual value\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"sd= sqrt(rdd.map(lambda x: pow(x-mean,2)).sum()/n)\\nn = float(n) # to round off\\nskw = (1/n)*rdd.map(lambda x : pow(x- mean,3)/pow(sd,3)).sum()\\nskw\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"Kurtosis
\\n\\n- tells us the shape of the data
\\n- indicates outlier content within the data
\\n- kurt = $\\\\frac{1}{n}\\\\frac{\\\\sum_{j=1}^n (x_j- \\\\bar{x})^4}{\\\\text{SD}^4}$, x_j= individual value\\n\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"(1/n)*rdd.map(lambda x : pow(x- mean,4)/pow(sd,4)).sum()\\n\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"Covariance \\\\& Correlation
\\n\\n- how two columns interact with each other
\\n- how all columns interact with each other
\\n- cov(X,Y) = $\\\\frac{1}{n} \\\\sum_{i=1}^n (x_i-\\\\bar{x})(y_i -\\\\bar{y})$\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"rddX = sc.parallelize(range(100))\\nrddY = sc.parallelize(range(100))\",\n \"_____no_output_____\"\n ],\n [\n \"# to avoid loss of precision use float\\nmeanX = rddX.sum()/float(rddX.count())\\nmeanY = rddY.sum()/float(rddY.count())\",\n \"_____no_output_____\"\n ],\n [\n \"# since we need to use rddx, rddy same time we need to zip them together\\nrddXY = rddX.zip(rddY)\\ncovXY = rddXY.map(lambda x:(x[0]-meanX)*(x[1]-meanY)).sum()/rddXY.count()\\ncovXY\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"Correlation\\n\\n- corr(X,Y) =$ \\\\frac{\\\\text{cov(X,Y)}}{SD_X SD_Y}$\\n
\\n\\nMeasure of dependency - Correlation
\\n +1 Columns totally correlate
\\n 0 columns show no interaction
\\n -1 inverse dependency\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"from math import sqrt\\nn = rddXY.count()\\nmean = sum_/n\\nSDX = sqrt(rdd.map(lambda x: pow(x-meanX,2)).sum()/n)\\nSDY = sqrt(rdd.map(lambda y: pow(y-meanY,2)).sum()/n)\\ncorrXY = covXY/(SDX *SDY)\\ncorrXY\",\n \"_____no_output_____\"\n ],\n [\n \"# corellation matrix in practice\\nimport random\\nfrom pyspark.mllib.stat import Statistics\\ncol1 = sc.parallelize(range(100))\\ncol2 = sc.parallelize(range(100,200))\\ncol3 = sc.parallelize(list(reversed(range(100))))\\ncol4 = sc.parallelize(random.sample(range(100),100))\\ndata = col1\\ndata.take(5)\",\n \"_____no_output_____\"\n ],\n [\n \"data1 = col1.zip(col2)\\ndata1.take(5)\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"Welcome to exercise one of week two of “Apache Spark for Scalable Machine Learning on BigData”. In this exercise you’ll read a DataFrame in order to perform a simple statistical analysis. Then you’ll rebalance the dataset. No worries, we’ll explain everything to you, let’s get started.\\n\\nLet’s create a data frame from a remote file by downloading it:\\n\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# delete files from previous runs\\n!rm -f hmp.parquet*\\n\\n# download the file containing the data in PARQUET format\\n!wget https://github.com/IBM/coursera/raw/master/hmp.parquet\\n \\n# create a dataframe out of it\\ndf = spark.read.parquet('hmp.parquet')\\n\\n# register a corresponding query table\\ndf.createOrReplaceTempView('df')\",\n \"--2020-11-06 02:38:52-- https://github.com/IBM/coursera/raw/master/hmp.parquet\\nResolving github.com (github.com)... 140.82.114.3\\nConnecting to github.com (github.com)|140.82.114.3|:443... connected.\\nHTTP request sent, awaiting response... 301 Moved Permanently\\nLocation: https://github.com/IBM/skillsnetwork/raw/master/hmp.parquet [following]\\n--2020-11-06 02:38:52-- https://github.com/IBM/skillsnetwork/raw/master/hmp.parquet\\nReusing existing connection to github.com:443.\\nHTTP request sent, awaiting response... 302 Found\\nLocation: https://raw.githubusercontent.com/IBM/skillsnetwork/master/hmp.parquet [following]\\n--2020-11-06 02:38:53-- https://raw.githubusercontent.com/IBM/skillsnetwork/master/hmp.parquet\\nResolving raw.githubusercontent.com (raw.githubusercontent.com)... 151.101.52.133\\nConnecting to raw.githubusercontent.com (raw.githubusercontent.com)|151.101.52.133|:443... connected.\\nHTTP request sent, awaiting response... 200 OK\\nLength: 932997 (911K) [application/octet-stream]\\nSaving to: ‘hmp.parquet’\\n\\nhmp.parquet 100%[===================>] 911.13K 3.79MB/s in 0.2s \\n\\n2020-11-06 02:38:53 (3.79 MB/s) - ‘hmp.parquet’ saved [932997/932997]\\n\\n\"\n ]\n ],\n [\n [\n \"This is a classical classification data set. One thing we always do during data analysis is checking if the classes are balanced. In other words, if there are more or less the same number of example in each class. Let’s find out by a simple aggregation using SQL.\\n\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"from pyspark.sql.functions import col\\ncounts = df.groupBy('class').count().orderBy('count')\\ndisplay(counts)\",\n \"_____no_output_____\"\n ],\n [\n \"df.groupBy('class').count().show()\",\n \"+--------------+-----+\\n| class|count|\\n+--------------+-----+\\n| Use_telephone|15225|\\n| Standup_chair|25417|\\n| Eat_meat|31236|\\n| Getup_bed|45801|\\n| Drink_glass|42792|\\n| Pour_water|41673|\\n| Comb_hair|23504|\\n| Walk|92254|\\n| Climb_stairs|40258|\\n| Sitdown_chair|25036|\\n| Liedown_bed|11446|\\n|Descend_stairs|15375|\\n| Brush_teeth|29829|\\n| Eat_soup| 6683|\\n+--------------+-----+\\n\\n\"\n ],\n [\n \"spark.sql('select class,count(*) from df group by class').show()\",\n \"+--------------+--------+\\n| class|count(1)|\\n+--------------+--------+\\n| Use_telephone| 15225|\\n| Standup_chair| 25417|\\n| Eat_meat| 31236|\\n| Getup_bed| 45801|\\n| Drink_glass| 42792|\\n| Pour_water| 41673|\\n| Comb_hair| 23504|\\n| Walk| 92254|\\n| Climb_stairs| 40258|\\n| Sitdown_chair| 25036|\\n| Liedown_bed| 11446|\\n|Descend_stairs| 15375|\\n| Brush_teeth| 29829|\\n| Eat_soup| 6683|\\n+--------------+--------+\\n\\n\"\n ]\n ],\n [\n [\n \"This looks nice, but it would be nice if we can aggregate further to obtain some quantitative metrics on the imbalance like, min, max, mean and standard deviation. If we divide max by min we get a measure called minmax ration which tells us something about the relationship between the smallest and largest class. Again, let’s first use SQL for those of you familiar with SQL. Don’t be scared, we’re used nested sub-selects, basically selecting from a result of a SQL query like it was a table. All within on SQL statement.\\n\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"spark.sql('''\\n select \\n *,\\n max/min as minmaxratio -- compute minmaxratio based on previously computed values\\n from (\\n select \\n min(ct) as min, -- compute minimum value of all classes\\n max(ct) as max, -- compute maximum value of all classes\\n mean(ct) as mean, -- compute mean between all classes\\n stddev(ct) as stddev -- compute standard deviation between all classes\\n from (\\n select\\n count(*) as ct -- count the number of rows per class and rename it to ct\\n from df -- access the temporary query table called df backed by DataFrame df\\n group by class -- aggrecate over class\\n )\\n ) \\n''').show()\",\n \"+----+-----+------------------+------------------+-----------------+\\n| min| max| mean| stddev| minmaxratio|\\n+----+-----+------------------+------------------+-----------------+\\n|6683|92254|31894.928571428572|21284.893716741157|13.80427951518779|\\n+----+-----+------------------+------------------+-----------------+\\n\\n\"\n ]\n ],\n [\n [\n \"The same query can be expressed using the DataFrame API. Again, don’t be scared. It’s just a sequential expression of transformation steps. You now an choose which syntax you like better.\\n\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"df.show()\\ndf.printSchema()\",\n \"+---+---+---+--------------------+-----------+\\n| x| y| z| source| class|\\n+---+---+---+--------------------+-----------+\\n| 22| 49| 35|Accelerometer-201...|Brush_teeth|\\n| 22| 49| 35|Accelerometer-201...|Brush_teeth|\\n| 22| 52| 35|Accelerometer-201...|Brush_teeth|\\n| 22| 52| 35|Accelerometer-201...|Brush_teeth|\\n| 21| 52| 34|Accelerometer-201...|Brush_teeth|\\n| 22| 51| 34|Accelerometer-201...|Brush_teeth|\\n| 20| 50| 35|Accelerometer-201...|Brush_teeth|\\n| 22| 52| 34|Accelerometer-201...|Brush_teeth|\\n| 22| 50| 34|Accelerometer-201...|Brush_teeth|\\n| 22| 51| 35|Accelerometer-201...|Brush_teeth|\\n| 21| 51| 33|Accelerometer-201...|Brush_teeth|\\n| 20| 50| 34|Accelerometer-201...|Brush_teeth|\\n| 21| 49| 33|Accelerometer-201...|Brush_teeth|\\n| 21| 49| 33|Accelerometer-201...|Brush_teeth|\\n| 20| 51| 35|Accelerometer-201...|Brush_teeth|\\n| 18| 49| 34|Accelerometer-201...|Brush_teeth|\\n| 19| 48| 34|Accelerometer-201...|Brush_teeth|\\n| 16| 53| 34|Accelerometer-201...|Brush_teeth|\\n| 18| 52| 35|Accelerometer-201...|Brush_teeth|\\n| 18| 51| 32|Accelerometer-201...|Brush_teeth|\\n+---+---+---+--------------------+-----------+\\nonly showing top 20 rows\\n\\nroot\\n |-- x: integer (nullable = true)\\n |-- y: integer (nullable = true)\\n |-- z: integer (nullable = true)\\n |-- source: string (nullable = true)\\n |-- class: string (nullable = true)\\n\\n\"\n ],\n [\n \"from pyspark.sql.functions import col, min, max, mean, stddev\\n\\ndf \\\\\\n .groupBy('class') \\\\\\n .count() \\\\\\n .select([ \\n min(col(\\\"count\\\")).alias('min'), \\n max(col(\\\"count\\\")).alias('max'), \\n mean(col(\\\"count\\\")).alias('mean'), \\n stddev(col(\\\"count\\\")).alias('stddev') \\n ]) \\\\\\n .select([\\n col('*'),\\n (col(\\\"max\\\") / col(\\\"min\\\")).alias('minmaxratio')\\n ]) \\\\\\n .show()\\n\",\n \"+----+-----+------------------+------------------+-----------------+\\n| min| max| mean| stddev| minmaxratio|\\n+----+-----+------------------+------------------+-----------------+\\n|6683|92254|31894.928571428572|21284.893716741157|13.80427951518779|\\n+----+-----+------------------+------------------+-----------------+\\n\\n\"\n ]\n ],\n [\n [\n \"Now it’s time for you to work on the data set. First, please create a table of all classes with the respective counts, but this time, please order the table by the count number, ascending.\\n\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"df1 = df.groupBy('class').count()\\ndf1.sort('count',ascending=True).show()\",\n \"+--------------+-----+\\n| class|count|\\n+--------------+-----+\\n| Eat_soup| 6683|\\n| Liedown_bed|11446|\\n| Use_telephone|15225|\\n|Descend_stairs|15375|\\n| Comb_hair|23504|\\n| Sitdown_chair|25036|\\n| Standup_chair|25417|\\n| Brush_teeth|29829|\\n| Eat_meat|31236|\\n| Climb_stairs|40258|\\n| Pour_water|41673|\\n| Drink_glass|42792|\\n| Getup_bed|45801|\\n| Walk|92254|\\n+--------------+-----+\\n\\n\"\n ]\n ],\n [\n [\n \"Pixiedust is a very sophisticated library. It takes care of sorting as well. Please modify the bar chart so that it gets sorted by the number of elements per class, ascending. Hint: It’s an option available in the UI once rendered using the display() function.\\n\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"import pixiedust\\nfrom pyspark.sql.functions import col\\ncounts = df.groupBy('class').count().orderBy('count')\\ndisplay(counts)\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"Imbalanced classes can cause pain in machine learning. Therefore let’s rebalance. In the flowing we limit the number of elements per class to the amount of the least represented class. This is called undersampling. Other ways of rebalancing can be found here:\\n\\n[https://machinelearningmastery.com/tactics-to-combat-imbalanced-classes-in-your-machine-learning-dataset/](https://machinelearningmastery.com/tactics-to-combat-imbalanced-classes-in-your-machine-learning-dataset?cm_mmc=Email_Newsletter-_-Developer_Ed%2BTech-_-WW_WW-_-SkillsNetwork-Courses-IBMDeveloperSkillsNetwork-ML0201EN-SkillsNetwork-20647446&cm_mmca1=000026UJ&cm_mmca2=10006555&cm_mmca3=M12345678&cvosrc=email.Newsletter.M12345678&cvo_campaign=000026UJ)\\n\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"from pyspark.sql.functions import min\\n\\n# create a lot of distinct classes from the dataset\\nclasses = [row[0] for row in df.select('class').distinct().collect()]\\n\\n# compute the number of elements of the smallest class in order to limit the number of samples per calss\\nmin = df.groupBy('class').count().select(min('count')).first()[0]\\n\\n# define the result dataframe variable\\ndf_balanced = None\\n\\n# iterate over distinct classes\\nfor cls in classes:\\n \\n # only select examples for the specific class within this iteration\\n # shuffle the order of the elements (by setting fraction to 1.0 sample works like shuffle)\\n # return only the first n samples\\n df_temp = df \\\\\\n .filter(\\\"class = '\\\"+cls+\\\"'\\\") \\\\\\n .sample(False, 1.0) \\\\\\n .limit(min)\\n \\n # on first iteration, assing df_temp to empty df_balanced\\n if df_balanced == None: \\n df_balanced = df_temp\\n # afterwards, append vertically\\n else:\\n df_balanced=df_balanced.union(df_temp)\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"Please verify, by using the code cell below, if df_balanced has the same number of elements per class. You should get 6683 elements per class.\\n\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"$$$\",\n \"_____no_output_____\"\n ]\n ]\n]"},"cell_types":{"kind":"list like","value":["code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code"],"string":"[\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\"\n]"},"cell_type_groups":{"kind":"list like","value":[["code","code"],["markdown"],["code"],["markdown"],["code","code"],["markdown"],["code","code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code","code","code"],["markdown"],["code","code","code"],["markdown"],["code"],["markdown"],["code","code","code"],["markdown"],["code"],["markdown"],["code","code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"],["markdown"],["code"]],"string":"[\n [\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ]\n]"}}},{"rowIdx":1458818,"cells":{"hexsha":{"kind":"string","value":"e7e3778f6cf9a740cbd15c4985d5178b19d6d5ba"},"size":{"kind":"number","value":35627,"string":"35,627"},"ext":{"kind":"string","value":"ipynb"},"lang":{"kind":"string","value":"Jupyter Notebook"},"max_stars_repo_path":{"kind":"string","value":"NLP Basics.ipynb"},"max_stars_repo_name":{"kind":"string","value":"SaiAdityaGarlapati/nlp-peronsal-archive"},"max_stars_repo_head_hexsha":{"kind":"string","value":"b197123f4e4f93c6dd2e06d4cacf56744a7bd259"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"NLP Basics.ipynb"},"max_issues_repo_name":{"kind":"string","value":"SaiAdityaGarlapati/nlp-peronsal-archive"},"max_issues_repo_head_hexsha":{"kind":"string","value":"b197123f4e4f93c6dd2e06d4cacf56744a7bd259"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"NLP Basics.ipynb"},"max_forks_repo_name":{"kind":"string","value":"SaiAdityaGarlapati/nlp-peronsal-archive"},"max_forks_repo_head_hexsha":{"kind":"string","value":"b197123f4e4f93c6dd2e06d4cacf56744a7bd259"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"avg_line_length":{"kind":"number","value":37.5416227608,"string":"37.541623"},"max_line_length":{"kind":"number","value":1535,"string":"1,535"},"alphanum_fraction":{"kind":"number","value":0.5824795801,"string":"0.58248"},"cells":{"kind":"list like","value":[[["#!pip install nltk","Collecting nltk\n Downloading https://files.pythonhosted.org/packages/6f/ed/9c755d357d33bc1931e157f537721efb5b88d2c583fe593cc09603076cc3/nltk-3.4.zip (1.4MB)\nRequirement already satisfied: six in c:\\users\\aditya\\anaconda3\\envs\\tensor_flow\\lib\\site-packages (from nltk) (1.12.0)\nCollecting singledispatch (from nltk)\n Downloading https://files.pythonhosted.org/packages/c5/10/369f50bcd4621b263927b0a1519987a04383d4a98fb10438042ad410cf88/singledispatch-3.4.0.3-py2.py3-none-any.whl\nBuilding wheels for collected packages: nltk\n Building wheel for nltk (setup.py): started\n Building wheel for nltk (setup.py): finished with status 'done'\n Stored in directory: C:\\Users\\Aditya\\AppData\\Local\\pip\\Cache\\wheels\\4b\\c8\\24\\b2343664bcceb7147efeb21c0b23703a05b23fcfeaceaa2a1e\nSuccessfully built nltk\nInstalling collected packages: singledispatch, nltk\nSuccessfully installed nltk-3.4 singledispatch-3.4.0.3\n"]],[["Importing NLTK packages\n","_____no_output_____"]],[["import nltk\nimport pandas as pd","_____no_output_____"],["restuarant = pd.read_csv(\"User_restaurants_reviews.csv\")","_____no_output_____"],["restuarant.head()","_____no_output_____"],["from nltk.tokenize import sent_tokenize, word_tokenize \nexample_text = restuarant[\"Review\"][1]\nprint(example_text)","I learned that if an electric slicer is used the blade becomes hot enough to start to cook the prosciutto.\n"],["nltk.download('stopwords')","[nltk_data] Downloading package stopwords to\n[nltk_data] C:\\Users\\Aditya\\AppData\\Roaming\\nltk_data...\n[nltk_data] Package stopwords is already up-to-date!\n"]],[["Importing stopwords and filtering data using list comprehension","_____no_output_____"]],[["from nltk.corpus import stopwords \nstop_words = set(stopwords.words('english')) ##Selecting the stop words we want\nprint(len(stop_words))\nprint(stop_words)","179\n{'have', 'didn', 'below', 'herself', 'my', 'the', 'when', 'y', 'for', 'how', 'had', 'because', 'between', 'some', 'this', 'themselves', 'a', 'ours', \"you're\", 'shan', 'nor', 'own', \"doesn't\", 'will', 'so', 'mustn', 'same', 'over', 'she', 'doing', 'mightn', 's', 'during', 'we', 'them', \"wasn't\", 'did', 'after', 'why', \"don't\", 'himself', 'yourselves', 've', 'itself', 'into', \"wouldn't\", 'haven', 'now', 'against', 'weren', 'just', 'once', \"you've\", \"aren't\", 'not', 'all', 'be', 'him', 'ma', \"needn't\", 'having', \"hadn't\", 'been', 'yourself', 'these', 'were', 'his', 're', \"haven't\", 'more', 'll', 'theirs', 'no', 'before', 'on', 'only', 'couldn', 'can', 't', 'while', \"couldn't\", 'was', 'from', 'me', 'here', 'hadn', 'their', 'who', \"you'll\", 'do', 'm', 'any', 'about', 'if', 'what', \"mustn't\", 'of', 'has', 'further', 'wouldn', 'aren', 'wasn', 'by', 'hasn', 'very', \"hasn't\", \"weren't\", 'is', 'hers', \"shouldn't\", 'it', \"didn't\", 'whom', 'that', 'again', \"that'll\", 'being', 'those', 'too', 'i', 'he', 'you', 'yours', 'off', 'your', 'in', 'out', 'as', 'to', 'most', 'isn', 'they', 'other', 'and', 'shouldn', 'd', 'ourselves', 'its', 'up', 'or', \"shan't\", 'few', 'above', \"won't\", 'which', \"mightn't\", 'down', 'where', 'does', 'until', \"she's\", 'with', 'each', 'o', 'needn', \"it's\", \"isn't\", 'than', 'then', 'should', 'her', 'through', 'at', 'doesn', 'am', 'but', 'under', \"you'd\", 'don', 'are', 'ain', 'such', 'both', 'won', \"should've\", 'there', 'an', 'our', 'myself'}\n"],["nltk.download('punkt')","[nltk_data] Downloading package punkt to\n[nltk_data] C:\\Users\\Aditya\\AppData\\Roaming\\nltk_data...\n[nltk_data] Unzipping tokenizers\\punkt.zip.\n"],["word_tokens = word_tokenize(example_text)\nprint(word_tokens)","['I', 'learned', 'that', 'if', 'an', 'electric', 'slicer', 'is', 'used', 'the', 'blade', 'becomes', 'hot', 'enough', 'to', 'start', 'to', 'cook', 'the', 'prosciutto', '.']\n"],["filtered_sentence = [word for word in word_tokens if not word in stop_words] \nprint(filtered_sentence)","['I', 'learned', 'electric', 'slicer', 'used', 'blade', 'becomes', 'hot', 'enough', 'start', 'cook', 'prosciutto', '.']\n"]],[["Stemming the sentence","_____no_output_____"]],[["from nltk.stem import PorterStemmer \nstemmer = PorterStemmer()","_____no_output_____"],["stem_tokens=[stemmer.stem(word) for word in word_tokens]\nprint(stem_tokens)","['I', 'learn', 'that', 'if', 'an', 'electr', 'slicer', 'is', 'use', 'the', 'blade', 'becom', 'hot', 'enough', 'to', 'start', 'to', 'cook', 'the', 'prosciutto', '.']\n"]],[["Comparing the stemmed sentence using jaccard similarity","_____no_output_____"]],[["from sklearn.metrics import jaccard_similarity_score\n\nscore = jaccard_similarity_score(word_tokens,stem_tokens)\nprint(score)","0.8095238095238095\n"],["nltk.download('averaged_perceptron_tagger')","[nltk_data] Downloading package averaged_perceptron_tagger to\n[nltk_data] C:\\Users\\Aditya\\AppData\\Roaming\\nltk_data...\n[nltk_data] Unzipping taggers\\averaged_perceptron_tagger.zip.\n"],["#Write a function to get all the possible POS tags of NLTK?\ntext = word_tokenize(\"And then therefore it was something completely different\")\nnltk.pos_tag(text)","_____no_output_____"],["nltk.download('tagsets')","[nltk_data] Downloading package tagsets to\n[nltk_data] C:\\Users\\Aditya\\AppData\\Roaming\\nltk_data...\n[nltk_data] Unzipping help\\tagsets.zip.\n"],["def all_pos_tags():\n print(nltk.help.upenn_tagset())\n\nall_pos_tags()","$: dollar\n $ -$ --$ A$ C$ HK$ M$ NZ$ S$ U.S.$ US$\n'': closing quotation mark\n ' ''\n(: opening parenthesis\n ( [ {\n): closing parenthesis\n ) ] }\n,: comma\n ,\n--: dash\n --\n.: sentence terminator\n . ! ?\n:: colon or ellipsis\n : ; ...\nCC: conjunction, coordinating\n & 'n and both but either et for less minus neither nor or plus so\n therefore times v. versus vs. whether yet\nCD: numeral, cardinal\n mid-1890 nine-thirty forty-two one-tenth ten million 0.5 one forty-\n seven 1987 twenty '79 zero two 78-degrees eighty-four IX '60s .025\n fifteen 271,124 dozen quintillion DM2,000 ...\nDT: determiner\n all an another any both del each either every half la many much nary\n neither no some such that the them these this those\nEX: existential there\n there\nFW: foreign word\n gemeinschaft hund ich jeux habeas Haementeria Herr K'ang-si vous\n lutihaw alai je jour objets salutaris fille quibusdam pas trop Monte\n terram fiche oui corporis ...\nIN: preposition or conjunction, subordinating\n astride among uppon whether out inside pro despite on by throughout\n below within for towards near behind atop around if like until below\n next into if beside ...\nJJ: adjective or numeral, ordinal\n third ill-mannered pre-war regrettable oiled calamitous first separable\n ectoplasmic battery-powered participatory fourth still-to-be-named\n multilingual multi-disciplinary ...\nJJR: adjective, comparative\n bleaker braver breezier briefer brighter brisker broader bumper busier\n calmer cheaper choosier cleaner clearer closer colder commoner costlier\n cozier creamier crunchier cuter ...\nJJS: adjective, superlative\n calmest cheapest choicest classiest cleanest clearest closest commonest\n corniest costliest crassest creepiest crudest cutest darkest deadliest\n dearest deepest densest dinkiest ...\nLS: list item marker\n A A. B B. C C. D E F First G H I J K One SP-44001 SP-44002 SP-44005\n SP-44007 Second Third Three Two * a b c d first five four one six three\n two\nMD: modal auxiliary\n can cannot could couldn't dare may might must need ought shall should\n shouldn't will would\nNN: noun, common, singular or mass\n common-carrier cabbage knuckle-duster Casino afghan shed thermostat\n investment slide humour falloff slick wind hyena override subhumanity\n machinist ...\nNNP: noun, proper, singular\n Motown Venneboerger Czestochwa Ranzer Conchita Trumplane Christos\n Oceanside Escobar Kreisler Sawyer Cougar Yvette Ervin ODI Darryl CTCA\n Shannon A.K.C. Meltex Liverpool ...\nNNPS: noun, proper, plural\n Americans Americas Amharas Amityvilles Amusements Anarcho-Syndicalists\n Andalusians Andes Andruses Angels Animals Anthony Antilles Antiques\n Apache Apaches Apocrypha ...\nNNS: noun, common, plural\n undergraduates scotches bric-a-brac products bodyguards facets coasts\n divestitures storehouses designs clubs fragrances averages\n subjectivists apprehensions muses factory-jobs ...\nPDT: pre-determiner\n all both half many quite such sure this\nPOS: genitive marker\n ' 's\nPRP: pronoun, personal\n hers herself him himself hisself it itself me myself one oneself ours\n ourselves ownself self she thee theirs them themselves they thou thy us\nPRP$: pronoun, possessive\n her his mine my our ours their thy your\nRB: adverb\n occasionally unabatingly maddeningly adventurously professedly\n stirringly prominently technologically magisterially predominately\n swiftly fiscally pitilessly ...\nRBR: adverb, comparative\n further gloomier grander graver greater grimmer harder harsher\n healthier heavier higher however larger later leaner lengthier less-\n perfectly lesser lonelier longer louder lower more ...\nRBS: adverb, superlative\n best biggest bluntest earliest farthest first furthest hardest\n heartiest highest largest least less most nearest second tightest worst\nRP: particle\n aboard about across along apart around aside at away back before behind\n by crop down ever fast for forth from go high i.e. in into just later\n low more off on open out over per pie raising start teeth that through\n under unto up up-pp upon whole with you\nSYM: symbol\n % & ' '' ''. ) ). * + ,. < = > @ A[fj] U.S U.S.S.R * ** ***\nTO: \"to\" as preposition or infinitive marker\n to\nUH: interjection\n Goodbye Goody Gosh Wow Jeepers Jee-sus Hubba Hey Kee-reist Oops amen\n huh howdy uh dammit whammo shucks heck anyways whodunnit honey golly\n man baby diddle hush sonuvabitch ...\nVB: verb, base form\n ask assemble assess assign assume atone attention avoid bake balkanize\n bank begin behold believe bend benefit bevel beware bless boil bomb\n boost brace break bring broil brush build ...\nVBD: verb, past tense\n dipped pleaded swiped regummed soaked tidied convened halted registered\n cushioned exacted snubbed strode aimed adopted belied figgered\n speculated wore appreciated contemplated ...\nVBG: verb, present participle or gerund\n telegraphing stirring focusing angering judging stalling lactating\n hankerin' alleging veering capping approaching traveling besieging\n encrypting interrupting erasing wincing ...\nVBN: verb, past participle\n multihulled dilapidated aerosolized chaired languished panelized used\n experimented flourished imitated reunifed factored condensed sheared\n unsettled primed dubbed desired ...\nVBP: verb, present tense, not 3rd person singular\n predominate wrap resort sue twist spill cure lengthen brush terminate\n appear tend stray glisten obtain comprise detest tease attract\n emphasize mold postpone sever return wag ...\nVBZ: verb, present tense, 3rd person singular\n bases reconstructs marks mixes displeases seals carps weaves snatches\n slumps stretches authorizes smolders pictures emerges stockpiles\n seduces fizzes uses bolsters slaps speaks pleads ...\nWDT: WH-determiner\n that what whatever which whichever\nWP: WH-pronoun\n that what whatever whatsoever which who whom whosoever\nWP$: WH-pronoun, possessive\n whose\nWRB: Wh-adverb\n how however whence whenever where whereby whereever wherein whereof why\n``: opening quotation mark\n ` ``\nNone\n"],["#Write a function to remove punctuation in NLTK\n\ndef remove_punctuation(s):\n words = nltk.word_tokenize(s)\n words=[word.lower() for word in words if word.isalpha()]\n print(words)\nstr1 = restuarant[\"Review\"][12]\nremove_punctuation(str1)","['now', 'i', 'am', 'getting', 'angry', 'and', 'i', 'want', 'my', 'damn', 'pho']\n"],["#Write a function to remove stop words in NLTK\n\ndef remove_stop_words(s):\n word_tokens = word_tokenize(s)\n print(word_tokens)\n filtered_sentence = [word for word in word_tokens if not word in stop_words] \n print(filtered_sentence)\nstr1 = restuarant[\"Review\"][20]\nremove_stop_words(str1)","['That', 'was', \"n't\", 'even', 'all', 'that', 'great', 'to', 'begin', 'with', '?']\n['That', \"n't\", 'even', 'great', 'begin', '?']\n"],["#Write a function to tokenise a sentence in NLTK\n\ndef tokenize_sentence(s):\n word_tokens = word_tokenize(s)\n print(word_tokens)\nstr1 = restuarant[\"Review\"][20]\ntokenize_sentence(str1)","['That', 'was', \"n't\", 'even', 'all', 'that', 'great', 'to', 'begin', 'with', '?']\n"],["Write a function to check whether the word is a German word or not? https://stackoverflow.com/questions/3788870/how-to-check-if-a-word-is-an-english-word-with-python\n\nWrite a function to get the human names from the text below: President Abraham Lincoln suspended the writ of habeas corpus in the Civil War. President Franklin D. Roosevelt claimed emergency powers to fight the Great Depression and World War II. President George W. Bush adopted an expansive concept of White House power after 9/11. President Barack Obama used executive action to shield some undocumented immigrants from deportation.\n\nWrite a function to create a word cloud using Python (with or without NLTK)","_____no_output_____"]],[["#jupyter kernelspec install-self --user\n1. Remove stop words from the content by using NLTK English stop words\n\n2. Get the stem by using Stemming\n\n3. Get the Similarity between two strings by using Jaccard Similarity\n\n4. Write a function to get all the possible POS tags of NLTK?\n\n5. Write a function to check whether the word is a German word or not?\nhttps://stackoverflow.com/questions/3788870/how-to-check-if-a-word-is-an-english-word-with-python\n\n6. Write a function to remove punctuation in NLTK\n\n7. Write a function to remove stop words using NLTK\n\n8. Write a function to tokenize a single sentence using NLTK\n\n9. Write a function to get the human names from the text below:\nPresident Abraham Lincoln suspended the writ of habeas corpus in the Civil War. President Franklin D. Roosevelt claimed emergency powers to fight the Great Depression and World War II. President George W. Bush adopted an expansive concept of White House power after 9/11. President Barack Obama used executive action to shield some undocumented immigrants from deportation.\n\n10. Write a function to create a word cloud using Python (with or without NLTK)\n\n11. How to get alpha numeric characters as tokens in NLTK\n\n12. How to remove all punctuation marks and non-alpha numeric characters?\n\n13. How to create a new corpus with NLTK?\n\n14. How to change the NLTK Download directory in Code?\n\n15. Show a small sample with pyStatParser\n\n16. How to get phrasses form text entries?\n\n17. How to use Chunk Extraction in NLTK?\n\n18. UnicodeDecodeError: ‘ascii’ codec can’t decode byte 0xcb in position 0: ordinal not in range(128)\nFix this issue\n\n19. Generate tag from text content\n\n20. Show a simple NLTK sample using Stanford NER Algorithm\n\n21. How to tokenize a string in NLTK?\n\n22. How to replace 1,2,3.. with “1st, 2nd, 3rd”\nHow to generate strings like “1st, 2nd, 3rd ..”\n\n23. How to extract number from text:\nHow to get the price from Kijiji or Craiglist content\n\n24. How to classify documents into categories\n\n25. Identify the language from the text\n\n26. Check grammar in the sentence by using Python\n\n27. Write a simple example to show dependency parsing in NLTK?\n\n28. Identify place in this sentence\nBolt was born on 21 August 1986 to parents Wellesley and Jennifer Bolt in Sherwood Content, a small town in Jamaica.\nHe has a brother, Sadiki, and a sister, Sherine.\nHis parents ran the local grocery store in the rural area, and Bolt spent his time playing cricket and football in the street with his brother, later saying, “When I was young, I didn’t really think about anything other than sports.”\nAs the reigning 200 m champion at both the World Youth and World Junior championships, Bolt hoped to take a clean sweep of the world 200 m championships in the Senior World Championships in Paris.\n\n29. Find all cities in this page:\n\n30. Convert “spamming” to “spam” by using Lemmatizer\n\n31. Write a simple code to show Perceptron tagger\n\n32. Write an example code to show PunktSentenceTokenizer\n\n33. Convert past tense “gave” to present tense “give” by using NLTK\n\n34. Write a code to show WordNetLemmatizer\n\n35. Write a code show MultiNomial Naive Bayes and NLTK\n\n36. Write an example to show NLTK Collocation\n\n37. Identify Gender from the given sentence by using NLTK\nKelly and John went to meet Ryan and Jenni. But Jenni was not there when they reached the place.\n\n38. Write a code showcase FreqDist in python\n\n39. Do a sentiment analysis on thie sentence\nI love this sandwich\n\n40. Collect nouns from this sentence\nI am John from Toronto\n\n41. Compute N Grams in this sentence\nBolt was born on 21 August 1986 to parents Wellesley and Jennifer Bolt in Sherwood Content, a small town in Jamaica.\nHe has a brother, Sadiki, and a sister, Sherine.\nHis parents ran the local grocery store in the rural area, and Bolt spent his time playing cricket and football in the street with his brother, later saying, “When I was young, I didn’t really think about anything other than sports.”\nAs the reigning 200 m champion at both the World Youth and World Junior championships, Bolt hoped to take a clean sweep of the world 200 m championships in the Senior World Championships in Paris.\n\n42. Write a code to use WordNik\n\n43. Find meaning of “Dunk” by using Cambring API\nhttps://dictionary-api.cambridge.org/\n\n44. How to count the frequency of bigram?\nUse the sentence below\nI love Canada . I am so in love with Canada . Canada is great . samsung is great . I really really love Canadian cities. America can never beat Canada . Canada is better than America\n\n45. Count the verbs in this sentence:\nBolt was born on 21 August 1986 to parents Wellesley and Jennifer Bolt in Sherwood Content, a small town in Jamaica.\nHe has a brother, Sadiki, and a sister, Sherine.\nHis parents ran the local grocery store in the rural area, and Bolt spent his time playing cricket and football in the street with his brother, later saying, “When I was young, I didn’t really think about anything other than sports.”\nAs the reigning 200 m champion at both the World Youth and World Junior championships, Bolt hoped to take a clean sweep of the world 200 m championships in the Senior World Championships in Paris.\n\n46. Show a simple example using ANTLR\nhttps://www.antlr.org/\n\n47. Simple example to show Latent Semantic Modeling in NLTK\n\n48. Show an exmple for Singular Value Decomposition\n\n49. Use LDA in NLTK\n\n50. Show a simple example using NLTK GAE\nhttps://github.com/rutherford/nltk-gae\n\n51. Show a simple exaple using Penn Treebank\n\n52. Do a simple Sentiment Analysis with NLTK and Naive Bayes\n\n53. Write a code to get top terms with TF-IDF score\n\n54. Compare two sentences\n\n55. Find the probability of a word in a given sentence\n\n56. Compare “car” and “äutomobile” by using Wordnet\n\n57. Check the frequency of similar words\n\n58. Show an example using MaltParser\n\n59. Identify the subject of the sentence\n\n60. How to extract word from Synset? Show an example\n\n61. Generate sentences by using\n\n62. Print random sentences like in Lorem Ipsum\n\n63. Count ngrams in the sentence below:\nBolt was born on 21 August 1986 to parents Wellesley and Jennifer Bolt in Sherwood Content, a small town in Jamaica.\nHe has a brother, Sadiki, and a sister, Sherine.\nHis parents ran the local grocery store in the rural area, and Bolt spent his time playing cricket and football in the street with his brother, later saying, “When I was young, I didn’t really think about anything other than sports.”\nAs the reigning 200 m champion at both the World Youth and World Junior championships, Bolt hoped to take a clean sweep of the world 200 m championships in the Senior World Championships in Paris.\n\n64. Use MaltParser in Python\n\n65. Generate tags on any celebrity’s tweets?\n\n66. Write a code to show TF-IDF\n\n67. Classify moview reviews as “good” or “bad” by using NLTK\n\n68. Show a sample for Alignment model in NLTK\n\n69. How to save an alignment model in NLTK?\n\n70. Find verbs in the given sentence\nBolt was born on 21 August 1986 to parents Wellesley and Jennifer Bolt in Sherwood Content, a small town in Jamaica.\nHe has a brother, Sadiki, and a sister, Sherine.\nHis parents ran the local grocery store in the rural area, and Bolt spent his time playing cricket and football in the street with his brother, later saying, “When I was young, I didn’t really think about anything other than sports.”\nAs the reigning 200 m champion at both the World Youth and World Junior championships, Bolt hoped to take a clean sweep of the world 200 m championships in the Senior World Championships in Paris.\n\n71. Get Named Entities from the sentence\nBolt was born on 21 August 1986 to parents Wellesley and Jennifer Bolt in Sherwood Content, a small town in Jamaica.\nHe has a brother, Sadiki, and a sister, Sherine.\nHis parents ran the local grocery store in the rural area, and Bolt spent his time playing cricket and football in the street with his brother, later saying, “When I was young, I didn’t really think about anything other than sports.”\nAs the reigning 200 m champion at both the World Youth and World Junior championships, Bolt hoped to take a clean sweep of the world 200 m championships in the Senior World Championships in Paris.\n\n72. Write a code to use Vader Sentiment Analyzer\n\n73. Generat Random text in Python\n\n74. Find the cosine similarity of two sentences\nJulie loves me more than Linda loves me\nJane likes me more than Julie loves me\n\n75. Show an example using ViterbiParser\n\n76. Show a simple sentiment analysis by using Pointwise Mutual Information\n\n77. Use NER to find persons\n\n78. How to parse multiple sentences using MaltParser?\n\n79. Find the tense of the sentence\n\n80. How to calculate BLEU score for a sentence\n\n81. How to find given word is singular or not?\n\n82. How to identify contractions in a given sentence\n\n83. Find the similarity between “cheap” and “low price” by using NLTK\n\n84. In the given sentence “John” spelled wrong as “Jhon”. How to find biblical names and fix these typos?\n\n85. Show an exmple to use Metaphones in NLTK or Python\n\n86. Show an example to use FuzzyWuzzy\n\n87. How to use Stanford Relation Extractor?\n\n88. Show an example to find the named entities\n\n89. Find the Sentiment by using SentiWordNet\n\n90. Create a custom Corpus by using NLTK?\n\n91. Show an example to extract relationships using NLTK?\n\n92. Fix this code issue\n\n93. Identify short form in the sentence by using NLTK\n“ty. U did an awesome job. However, It would b gr8 If you type w/o short forms”\n\n94. How to auto label the given texts\n\n95. Identify food names in the given sentence\nI had some Chips and Turkey for the lunch. Later I had some ice cream and rice in the evening.\n\n96. Find the specific phrase by using NLTK Regex\nThe pizza was awesome and brilliant\n\n97. Convert these lines to two sentences by using Sentence tokenizer?\nFig. 2 shows a U.S.A. map. However, this is not exactly right\n\n98. How to convert this positive sentence to negative\nAnna is a great girl and she learn things quickly.\n\n99. Find the general synonyms\n\n100. Show an example to extract useful sentences by using NLTK\n","_____no_output_____"]]],"string":"[\n [\n [\n \"#!pip install nltk\",\n \"Collecting nltk\\n Downloading https://files.pythonhosted.org/packages/6f/ed/9c755d357d33bc1931e157f537721efb5b88d2c583fe593cc09603076cc3/nltk-3.4.zip (1.4MB)\\nRequirement already satisfied: six in c:\\\\users\\\\aditya\\\\anaconda3\\\\envs\\\\tensor_flow\\\\lib\\\\site-packages (from nltk) (1.12.0)\\nCollecting singledispatch (from nltk)\\n Downloading https://files.pythonhosted.org/packages/c5/10/369f50bcd4621b263927b0a1519987a04383d4a98fb10438042ad410cf88/singledispatch-3.4.0.3-py2.py3-none-any.whl\\nBuilding wheels for collected packages: nltk\\n Building wheel for nltk (setup.py): started\\n Building wheel for nltk (setup.py): finished with status 'done'\\n Stored in directory: C:\\\\Users\\\\Aditya\\\\AppData\\\\Local\\\\pip\\\\Cache\\\\wheels\\\\4b\\\\c8\\\\24\\\\b2343664bcceb7147efeb21c0b23703a05b23fcfeaceaa2a1e\\nSuccessfully built nltk\\nInstalling collected packages: singledispatch, nltk\\nSuccessfully installed nltk-3.4 singledispatch-3.4.0.3\\n\"\n ]\n ],\n [\n [\n \"Importing NLTK packages\\n\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"import nltk\\nimport pandas as pd\",\n \"_____no_output_____\"\n ],\n [\n \"restuarant = pd.read_csv(\\\"User_restaurants_reviews.csv\\\")\",\n \"_____no_output_____\"\n ],\n [\n \"restuarant.head()\",\n \"_____no_output_____\"\n ],\n [\n \"from nltk.tokenize import sent_tokenize, word_tokenize \\nexample_text = restuarant[\\\"Review\\\"][1]\\nprint(example_text)\",\n \"I learned that if an electric slicer is used the blade becomes hot enough to start to cook the prosciutto.\\n\"\n ],\n [\n \"nltk.download('stopwords')\",\n \"[nltk_data] Downloading package stopwords to\\n[nltk_data] C:\\\\Users\\\\Aditya\\\\AppData\\\\Roaming\\\\nltk_data...\\n[nltk_data] Package stopwords is already up-to-date!\\n\"\n ]\n ],\n [\n [\n \"Importing stopwords and filtering data using list comprehension\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"from nltk.corpus import stopwords \\nstop_words = set(stopwords.words('english')) ##Selecting the stop words we want\\nprint(len(stop_words))\\nprint(stop_words)\",\n \"179\\n{'have', 'didn', 'below', 'herself', 'my', 'the', 'when', 'y', 'for', 'how', 'had', 'because', 'between', 'some', 'this', 'themselves', 'a', 'ours', \\\"you're\\\", 'shan', 'nor', 'own', \\\"doesn't\\\", 'will', 'so', 'mustn', 'same', 'over', 'she', 'doing', 'mightn', 's', 'during', 'we', 'them', \\\"wasn't\\\", 'did', 'after', 'why', \\\"don't\\\", 'himself', 'yourselves', 've', 'itself', 'into', \\\"wouldn't\\\", 'haven', 'now', 'against', 'weren', 'just', 'once', \\\"you've\\\", \\\"aren't\\\", 'not', 'all', 'be', 'him', 'ma', \\\"needn't\\\", 'having', \\\"hadn't\\\", 'been', 'yourself', 'these', 'were', 'his', 're', \\\"haven't\\\", 'more', 'll', 'theirs', 'no', 'before', 'on', 'only', 'couldn', 'can', 't', 'while', \\\"couldn't\\\", 'was', 'from', 'me', 'here', 'hadn', 'their', 'who', \\\"you'll\\\", 'do', 'm', 'any', 'about', 'if', 'what', \\\"mustn't\\\", 'of', 'has', 'further', 'wouldn', 'aren', 'wasn', 'by', 'hasn', 'very', \\\"hasn't\\\", \\\"weren't\\\", 'is', 'hers', \\\"shouldn't\\\", 'it', \\\"didn't\\\", 'whom', 'that', 'again', \\\"that'll\\\", 'being', 'those', 'too', 'i', 'he', 'you', 'yours', 'off', 'your', 'in', 'out', 'as', 'to', 'most', 'isn', 'they', 'other', 'and', 'shouldn', 'd', 'ourselves', 'its', 'up', 'or', \\\"shan't\\\", 'few', 'above', \\\"won't\\\", 'which', \\\"mightn't\\\", 'down', 'where', 'does', 'until', \\\"she's\\\", 'with', 'each', 'o', 'needn', \\\"it's\\\", \\\"isn't\\\", 'than', 'then', 'should', 'her', 'through', 'at', 'doesn', 'am', 'but', 'under', \\\"you'd\\\", 'don', 'are', 'ain', 'such', 'both', 'won', \\\"should've\\\", 'there', 'an', 'our', 'myself'}\\n\"\n ],\n [\n \"nltk.download('punkt')\",\n \"[nltk_data] Downloading package punkt to\\n[nltk_data] C:\\\\Users\\\\Aditya\\\\AppData\\\\Roaming\\\\nltk_data...\\n[nltk_data] Unzipping tokenizers\\\\punkt.zip.\\n\"\n ],\n [\n \"word_tokens = word_tokenize(example_text)\\nprint(word_tokens)\",\n \"['I', 'learned', 'that', 'if', 'an', 'electric', 'slicer', 'is', 'used', 'the', 'blade', 'becomes', 'hot', 'enough', 'to', 'start', 'to', 'cook', 'the', 'prosciutto', '.']\\n\"\n ],\n [\n \"filtered_sentence = [word for word in word_tokens if not word in stop_words] \\nprint(filtered_sentence)\",\n \"['I', 'learned', 'electric', 'slicer', 'used', 'blade', 'becomes', 'hot', 'enough', 'start', 'cook', 'prosciutto', '.']\\n\"\n ]\n ],\n [\n [\n \"Stemming the sentence\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"from nltk.stem import PorterStemmer \\nstemmer = PorterStemmer()\",\n \"_____no_output_____\"\n ],\n [\n \"stem_tokens=[stemmer.stem(word) for word in word_tokens]\\nprint(stem_tokens)\",\n \"['I', 'learn', 'that', 'if', 'an', 'electr', 'slicer', 'is', 'use', 'the', 'blade', 'becom', 'hot', 'enough', 'to', 'start', 'to', 'cook', 'the', 'prosciutto', '.']\\n\"\n ]\n ],\n [\n [\n \"Comparing the stemmed sentence using jaccard similarity\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"from sklearn.metrics import jaccard_similarity_score\\n\\nscore = jaccard_similarity_score(word_tokens,stem_tokens)\\nprint(score)\",\n \"0.8095238095238095\\n\"\n ],\n [\n \"nltk.download('averaged_perceptron_tagger')\",\n \"[nltk_data] Downloading package averaged_perceptron_tagger to\\n[nltk_data] C:\\\\Users\\\\Aditya\\\\AppData\\\\Roaming\\\\nltk_data...\\n[nltk_data] Unzipping taggers\\\\averaged_perceptron_tagger.zip.\\n\"\n ],\n [\n \"#Write a function to get all the possible POS tags of NLTK?\\ntext = word_tokenize(\\\"And then therefore it was something completely different\\\")\\nnltk.pos_tag(text)\",\n \"_____no_output_____\"\n ],\n [\n \"nltk.download('tagsets')\",\n \"[nltk_data] Downloading package tagsets to\\n[nltk_data] C:\\\\Users\\\\Aditya\\\\AppData\\\\Roaming\\\\nltk_data...\\n[nltk_data] Unzipping help\\\\tagsets.zip.\\n\"\n ],\n [\n \"def all_pos_tags():\\n print(nltk.help.upenn_tagset())\\n\\nall_pos_tags()\",\n \"$: dollar\\n $ -$ --$ A$ C$ HK$ M$ NZ$ S$ U.S.$ US$\\n'': closing quotation mark\\n ' ''\\n(: opening parenthesis\\n ( [ {\\n): closing parenthesis\\n ) ] }\\n,: comma\\n ,\\n--: dash\\n --\\n.: sentence terminator\\n . ! ?\\n:: colon or ellipsis\\n : ; ...\\nCC: conjunction, coordinating\\n & 'n and both but either et for less minus neither nor or plus so\\n therefore times v. versus vs. whether yet\\nCD: numeral, cardinal\\n mid-1890 nine-thirty forty-two one-tenth ten million 0.5 one forty-\\n seven 1987 twenty '79 zero two 78-degrees eighty-four IX '60s .025\\n fifteen 271,124 dozen quintillion DM2,000 ...\\nDT: determiner\\n all an another any both del each either every half la many much nary\\n neither no some such that the them these this those\\nEX: existential there\\n there\\nFW: foreign word\\n gemeinschaft hund ich jeux habeas Haementeria Herr K'ang-si vous\\n lutihaw alai je jour objets salutaris fille quibusdam pas trop Monte\\n terram fiche oui corporis ...\\nIN: preposition or conjunction, subordinating\\n astride among uppon whether out inside pro despite on by throughout\\n below within for towards near behind atop around if like until below\\n next into if beside ...\\nJJ: adjective or numeral, ordinal\\n third ill-mannered pre-war regrettable oiled calamitous first separable\\n ectoplasmic battery-powered participatory fourth still-to-be-named\\n multilingual multi-disciplinary ...\\nJJR: adjective, comparative\\n bleaker braver breezier briefer brighter brisker broader bumper busier\\n calmer cheaper choosier cleaner clearer closer colder commoner costlier\\n cozier creamier crunchier cuter ...\\nJJS: adjective, superlative\\n calmest cheapest choicest classiest cleanest clearest closest commonest\\n corniest costliest crassest creepiest crudest cutest darkest deadliest\\n dearest deepest densest dinkiest ...\\nLS: list item marker\\n A A. B B. C C. D E F First G H I J K One SP-44001 SP-44002 SP-44005\\n SP-44007 Second Third Three Two * a b c d first five four one six three\\n two\\nMD: modal auxiliary\\n can cannot could couldn't dare may might must need ought shall should\\n shouldn't will would\\nNN: noun, common, singular or mass\\n common-carrier cabbage knuckle-duster Casino afghan shed thermostat\\n investment slide humour falloff slick wind hyena override subhumanity\\n machinist ...\\nNNP: noun, proper, singular\\n Motown Venneboerger Czestochwa Ranzer Conchita Trumplane Christos\\n Oceanside Escobar Kreisler Sawyer Cougar Yvette Ervin ODI Darryl CTCA\\n Shannon A.K.C. Meltex Liverpool ...\\nNNPS: noun, proper, plural\\n Americans Americas Amharas Amityvilles Amusements Anarcho-Syndicalists\\n Andalusians Andes Andruses Angels Animals Anthony Antilles Antiques\\n Apache Apaches Apocrypha ...\\nNNS: noun, common, plural\\n undergraduates scotches bric-a-brac products bodyguards facets coasts\\n divestitures storehouses designs clubs fragrances averages\\n subjectivists apprehensions muses factory-jobs ...\\nPDT: pre-determiner\\n all both half many quite such sure this\\nPOS: genitive marker\\n ' 's\\nPRP: pronoun, personal\\n hers herself him himself hisself it itself me myself one oneself ours\\n ourselves ownself self she thee theirs them themselves they thou thy us\\nPRP$: pronoun, possessive\\n her his mine my our ours their thy your\\nRB: adverb\\n occasionally unabatingly maddeningly adventurously professedly\\n stirringly prominently technologically magisterially predominately\\n swiftly fiscally pitilessly ...\\nRBR: adverb, comparative\\n further gloomier grander graver greater grimmer harder harsher\\n healthier heavier higher however larger later leaner lengthier less-\\n perfectly lesser lonelier longer louder lower more ...\\nRBS: adverb, superlative\\n best biggest bluntest earliest farthest first furthest hardest\\n heartiest highest largest least less most nearest second tightest worst\\nRP: particle\\n aboard about across along apart around aside at away back before behind\\n by crop down ever fast for forth from go high i.e. in into just later\\n low more off on open out over per pie raising start teeth that through\\n under unto up up-pp upon whole with you\\nSYM: symbol\\n % & ' '' ''. ) ). * + ,. < = > @ A[fj] U.S U.S.S.R * ** ***\\nTO: \\\"to\\\" as preposition or infinitive marker\\n to\\nUH: interjection\\n Goodbye Goody Gosh Wow Jeepers Jee-sus Hubba Hey Kee-reist Oops amen\\n huh howdy uh dammit whammo shucks heck anyways whodunnit honey golly\\n man baby diddle hush sonuvabitch ...\\nVB: verb, base form\\n ask assemble assess assign assume atone attention avoid bake balkanize\\n bank begin behold believe bend benefit bevel beware bless boil bomb\\n boost brace break bring broil brush build ...\\nVBD: verb, past tense\\n dipped pleaded swiped regummed soaked tidied convened halted registered\\n cushioned exacted snubbed strode aimed adopted belied figgered\\n speculated wore appreciated contemplated ...\\nVBG: verb, present participle or gerund\\n telegraphing stirring focusing angering judging stalling lactating\\n hankerin' alleging veering capping approaching traveling besieging\\n encrypting interrupting erasing wincing ...\\nVBN: verb, past participle\\n multihulled dilapidated aerosolized chaired languished panelized used\\n experimented flourished imitated reunifed factored condensed sheared\\n unsettled primed dubbed desired ...\\nVBP: verb, present tense, not 3rd person singular\\n predominate wrap resort sue twist spill cure lengthen brush terminate\\n appear tend stray glisten obtain comprise detest tease attract\\n emphasize mold postpone sever return wag ...\\nVBZ: verb, present tense, 3rd person singular\\n bases reconstructs marks mixes displeases seals carps weaves snatches\\n slumps stretches authorizes smolders pictures emerges stockpiles\\n seduces fizzes uses bolsters slaps speaks pleads ...\\nWDT: WH-determiner\\n that what whatever which whichever\\nWP: WH-pronoun\\n that what whatever whatsoever which who whom whosoever\\nWP$: WH-pronoun, possessive\\n whose\\nWRB: Wh-adverb\\n how however whence whenever where whereby whereever wherein whereof why\\n``: opening quotation mark\\n ` ``\\nNone\\n\"\n ],\n [\n \"#Write a function to remove punctuation in NLTK\\n\\ndef remove_punctuation(s):\\n words = nltk.word_tokenize(s)\\n words=[word.lower() for word in words if word.isalpha()]\\n print(words)\\nstr1 = restuarant[\\\"Review\\\"][12]\\nremove_punctuation(str1)\",\n \"['now', 'i', 'am', 'getting', 'angry', 'and', 'i', 'want', 'my', 'damn', 'pho']\\n\"\n ],\n [\n \"#Write a function to remove stop words in NLTK\\n\\ndef remove_stop_words(s):\\n word_tokens = word_tokenize(s)\\n print(word_tokens)\\n filtered_sentence = [word for word in word_tokens if not word in stop_words] \\n print(filtered_sentence)\\nstr1 = restuarant[\\\"Review\\\"][20]\\nremove_stop_words(str1)\",\n \"['That', 'was', \\\"n't\\\", 'even', 'all', 'that', 'great', 'to', 'begin', 'with', '?']\\n['That', \\\"n't\\\", 'even', 'great', 'begin', '?']\\n\"\n ],\n [\n \"#Write a function to tokenise a sentence in NLTK\\n\\ndef tokenize_sentence(s):\\n word_tokens = word_tokenize(s)\\n print(word_tokens)\\nstr1 = restuarant[\\\"Review\\\"][20]\\ntokenize_sentence(str1)\",\n \"['That', 'was', \\\"n't\\\", 'even', 'all', 'that', 'great', 'to', 'begin', 'with', '?']\\n\"\n ],\n [\n \"Write a function to check whether the word is a German word or not? https://stackoverflow.com/questions/3788870/how-to-check-if-a-word-is-an-english-word-with-python\\n\\nWrite a function to get the human names from the text below: President Abraham Lincoln suspended the writ of habeas corpus in the Civil War. President Franklin D. Roosevelt claimed emergency powers to fight the Great Depression and World War II. President George W. Bush adopted an expansive concept of White House power after 9/11. President Barack Obama used executive action to shield some undocumented immigrants from deportation.\\n\\nWrite a function to create a word cloud using Python (with or without NLTK)\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"#jupyter kernelspec install-self --user\\n1. Remove stop words from the content by using NLTK English stop words\\n\\n2. Get the stem by using Stemming\\n\\n3. Get the Similarity between two strings by using Jaccard Similarity\\n\\n4. Write a function to get all the possible POS tags of NLTK?\\n\\n5. Write a function to check whether the word is a German word or not?\\nhttps://stackoverflow.com/questions/3788870/how-to-check-if-a-word-is-an-english-word-with-python\\n\\n6. Write a function to remove punctuation in NLTK\\n\\n7. Write a function to remove stop words using NLTK\\n\\n8. Write a function to tokenize a single sentence using NLTK\\n\\n9. Write a function to get the human names from the text below:\\nPresident Abraham Lincoln suspended the writ of habeas corpus in the Civil War. President Franklin D. Roosevelt claimed emergency powers to fight the Great Depression and World War II. President George W. Bush adopted an expansive concept of White House power after 9/11. President Barack Obama used executive action to shield some undocumented immigrants from deportation.\\n\\n10. Write a function to create a word cloud using Python (with or without NLTK)\\n\\n11. How to get alpha numeric characters as tokens in NLTK\\n\\n12. How to remove all punctuation marks and non-alpha numeric characters?\\n\\n13. How to create a new corpus with NLTK?\\n\\n14. How to change the NLTK Download directory in Code?\\n\\n15. Show a small sample with pyStatParser\\n\\n16. How to get phrasses form text entries?\\n\\n17. How to use Chunk Extraction in NLTK?\\n\\n18. UnicodeDecodeError: ‘ascii’ codec can’t decode byte 0xcb in position 0: ordinal not in range(128)\\nFix this issue\\n\\n19. Generate tag from text content\\n\\n20. Show a simple NLTK sample using Stanford NER Algorithm\\n\\n21. How to tokenize a string in NLTK?\\n\\n22. How to replace 1,2,3.. with “1st, 2nd, 3rd”\\nHow to generate strings like “1st, 2nd, 3rd ..”\\n\\n23. How to extract number from text:\\nHow to get the price from Kijiji or Craiglist content\\n\\n24. How to classify documents into categories\\n\\n25. Identify the language from the text\\n\\n26. Check grammar in the sentence by using Python\\n\\n27. Write a simple example to show dependency parsing in NLTK?\\n\\n28. Identify place in this sentence\\nBolt was born on 21 August 1986 to parents Wellesley and Jennifer Bolt in Sherwood Content, a small town in Jamaica.\\nHe has a brother, Sadiki, and a sister, Sherine.\\nHis parents ran the local grocery store in the rural area, and Bolt spent his time playing cricket and football in the street with his brother, later saying, “When I was young, I didn’t really think about anything other than sports.”\\nAs the reigning 200 m champion at both the World Youth and World Junior championships, Bolt hoped to take a clean sweep of the world 200 m championships in the Senior World Championships in Paris.\\n\\n29. Find all cities in this page:\\n\\n30. Convert “spamming” to “spam” by using Lemmatizer\\n\\n31. Write a simple code to show Perceptron tagger\\n\\n32. Write an example code to show PunktSentenceTokenizer\\n\\n33. Convert past tense “gave” to present tense “give” by using NLTK\\n\\n34. Write a code to show WordNetLemmatizer\\n\\n35. Write a code show MultiNomial Naive Bayes and NLTK\\n\\n36. Write an example to show NLTK Collocation\\n\\n37. Identify Gender from the given sentence by using NLTK\\nKelly and John went to meet Ryan and Jenni. But Jenni was not there when they reached the place.\\n\\n38. Write a code showcase FreqDist in python\\n\\n39. Do a sentiment analysis on thie sentence\\nI love this sandwich\\n\\n40. Collect nouns from this sentence\\nI am John from Toronto\\n\\n41. Compute N Grams in this sentence\\nBolt was born on 21 August 1986 to parents Wellesley and Jennifer Bolt in Sherwood Content, a small town in Jamaica.\\nHe has a brother, Sadiki, and a sister, Sherine.\\nHis parents ran the local grocery store in the rural area, and Bolt spent his time playing cricket and football in the street with his brother, later saying, “When I was young, I didn’t really think about anything other than sports.”\\nAs the reigning 200 m champion at both the World Youth and World Junior championships, Bolt hoped to take a clean sweep of the world 200 m championships in the Senior World Championships in Paris.\\n\\n42. Write a code to use WordNik\\n\\n43. Find meaning of “Dunk” by using Cambring API\\nhttps://dictionary-api.cambridge.org/\\n\\n44. How to count the frequency of bigram?\\nUse the sentence below\\nI love Canada . I am so in love with Canada . Canada is great . samsung is great . I really really love Canadian cities. America can never beat Canada . Canada is better than America\\n\\n45. Count the verbs in this sentence:\\nBolt was born on 21 August 1986 to parents Wellesley and Jennifer Bolt in Sherwood Content, a small town in Jamaica.\\nHe has a brother, Sadiki, and a sister, Sherine.\\nHis parents ran the local grocery store in the rural area, and Bolt spent his time playing cricket and football in the street with his brother, later saying, “When I was young, I didn’t really think about anything other than sports.”\\nAs the reigning 200 m champion at both the World Youth and World Junior championships, Bolt hoped to take a clean sweep of the world 200 m championships in the Senior World Championships in Paris.\\n\\n46. Show a simple example using ANTLR\\nhttps://www.antlr.org/\\n\\n47. Simple example to show Latent Semantic Modeling in NLTK\\n\\n48. Show an exmple for Singular Value Decomposition\\n\\n49. Use LDA in NLTK\\n\\n50. Show a simple example using NLTK GAE\\nhttps://github.com/rutherford/nltk-gae\\n\\n51. Show a simple exaple using Penn Treebank\\n\\n52. Do a simple Sentiment Analysis with NLTK and Naive Bayes\\n\\n53. Write a code to get top terms with TF-IDF score\\n\\n54. Compare two sentences\\n\\n55. Find the probability of a word in a given sentence\\n\\n56. Compare “car” and “äutomobile” by using Wordnet\\n\\n57. Check the frequency of similar words\\n\\n58. Show an example using MaltParser\\n\\n59. Identify the subject of the sentence\\n\\n60. How to extract word from Synset? Show an example\\n\\n61. Generate sentences by using\\n\\n62. Print random sentences like in Lorem Ipsum\\n\\n63. Count ngrams in the sentence below:\\nBolt was born on 21 August 1986 to parents Wellesley and Jennifer Bolt in Sherwood Content, a small town in Jamaica.\\nHe has a brother, Sadiki, and a sister, Sherine.\\nHis parents ran the local grocery store in the rural area, and Bolt spent his time playing cricket and football in the street with his brother, later saying, “When I was young, I didn’t really think about anything other than sports.”\\nAs the reigning 200 m champion at both the World Youth and World Junior championships, Bolt hoped to take a clean sweep of the world 200 m championships in the Senior World Championships in Paris.\\n\\n64. Use MaltParser in Python\\n\\n65. Generate tags on any celebrity’s tweets?\\n\\n66. Write a code to show TF-IDF\\n\\n67. Classify moview reviews as “good” or “bad” by using NLTK\\n\\n68. Show a sample for Alignment model in NLTK\\n\\n69. How to save an alignment model in NLTK?\\n\\n70. Find verbs in the given sentence\\nBolt was born on 21 August 1986 to parents Wellesley and Jennifer Bolt in Sherwood Content, a small town in Jamaica.\\nHe has a brother, Sadiki, and a sister, Sherine.\\nHis parents ran the local grocery store in the rural area, and Bolt spent his time playing cricket and football in the street with his brother, later saying, “When I was young, I didn’t really think about anything other than sports.”\\nAs the reigning 200 m champion at both the World Youth and World Junior championships, Bolt hoped to take a clean sweep of the world 200 m championships in the Senior World Championships in Paris.\\n\\n71. Get Named Entities from the sentence\\nBolt was born on 21 August 1986 to parents Wellesley and Jennifer Bolt in Sherwood Content, a small town in Jamaica.\\nHe has a brother, Sadiki, and a sister, Sherine.\\nHis parents ran the local grocery store in the rural area, and Bolt spent his time playing cricket and football in the street with his brother, later saying, “When I was young, I didn’t really think about anything other than sports.”\\nAs the reigning 200 m champion at both the World Youth and World Junior championships, Bolt hoped to take a clean sweep of the world 200 m championships in the Senior World Championships in Paris.\\n\\n72. Write a code to use Vader Sentiment Analyzer\\n\\n73. Generat Random text in Python\\n\\n74. Find the cosine similarity of two sentences\\nJulie loves me more than Linda loves me\\nJane likes me more than Julie loves me\\n\\n75. Show an example using ViterbiParser\\n\\n76. Show a simple sentiment analysis by using Pointwise Mutual Information\\n\\n77. Use NER to find persons\\n\\n78. How to parse multiple sentences using MaltParser?\\n\\n79. Find the tense of the sentence\\n\\n80. How to calculate BLEU score for a sentence\\n\\n81. How to find given word is singular or not?\\n\\n82. How to identify contractions in a given sentence\\n\\n83. Find the similarity between “cheap” and “low price” by using NLTK\\n\\n84. In the given sentence “John” spelled wrong as “Jhon”. How to find biblical names and fix these typos?\\n\\n85. Show an exmple to use Metaphones in NLTK or Python\\n\\n86. Show an example to use FuzzyWuzzy\\n\\n87. How to use Stanford Relation Extractor?\\n\\n88. Show an example to find the named entities\\n\\n89. Find the Sentiment by using SentiWordNet\\n\\n90. Create a custom Corpus by using NLTK?\\n\\n91. Show an example to extract relationships using NLTK?\\n\\n92. Fix this code issue\\n\\n93. Identify short form in the sentence by using NLTK\\n“ty. U did an awesome job. However, It would b gr8 If you type w/o short forms”\\n\\n94. How to auto label the given texts\\n\\n95. Identify food names in the given sentence\\nI had some Chips and Turkey for the lunch. Later I had some ice cream and rice in the evening.\\n\\n96. Find the specific phrase by using NLTK Regex\\nThe pizza was awesome and brilliant\\n\\n97. Convert these lines to two sentences by using Sentence tokenizer?\\nFig. 2 shows a U.S.A. map. However, this is not exactly right\\n\\n98. How to convert this positive sentence to negative\\nAnna is a great girl and she learn things quickly.\\n\\n99. Find the general synonyms\\n\\n100. Show an example to extract useful sentences by using NLTK\\n\",\n \"_____no_output_____\"\n ]\n ]\n]"},"cell_types":{"kind":"list like","value":["code","markdown","code","markdown","code","markdown","code","markdown","code","markdown"],"string":"[\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\"\n]"},"cell_type_groups":{"kind":"list like","value":[["code"],["markdown"],["code","code","code","code","code"],["markdown"],["code","code","code","code"],["markdown"],["code","code"],["markdown"],["code","code","code","code","code","code","code","code","code"],["markdown"]],"string":"[\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ]\n]"}}},{"rowIdx":1458819,"cells":{"hexsha":{"kind":"string","value":"e7e38181ab1c5567661e90d62ed6909d9290814c"},"size":{"kind":"number","value":516797,"string":"516,797"},"ext":{"kind":"string","value":"ipynb"},"lang":{"kind":"string","value":"Jupyter Notebook"},"max_stars_repo_path":{"kind":"string","value":"ids_R.ipynb"},"max_stars_repo_name":{"kind":"string","value":"MadhavJivrajani/IDS2019_Project"},"max_stars_repo_head_hexsha":{"kind":"string","value":"8eae458b0eebc39e1eecda0b8bbbf33f714574c8"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"ids_R.ipynb"},"max_issues_repo_name":{"kind":"string","value":"MadhavJivrajani/IDS2019_Project"},"max_issues_repo_head_hexsha":{"kind":"string","value":"8eae458b0eebc39e1eecda0b8bbbf33f714574c8"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"ids_R.ipynb"},"max_forks_repo_name":{"kind":"string","value":"MadhavJivrajani/IDS2019_Project"},"max_forks_repo_head_hexsha":{"kind":"string","value":"8eae458b0eebc39e1eecda0b8bbbf33f714574c8"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"avg_line_length":{"kind":"number","value":418.1205501618,"string":"418.12055"},"max_line_length":{"kind":"number","value":214182,"string":"214,182"},"alphanum_fraction":{"kind":"number","value":0.900241294,"string":"0.900241"},"cells":{"kind":"list like","value":[[["library(tidyverse) ","── \u001b[1mAttaching packages\u001b[22m ─────────────────────────────────────── tidyverse 1.2.1 ──\n\u001b[32m✔\u001b[39m \u001b[34mggplot2\u001b[39m 3.2.1 \u001b[32m✔\u001b[39m \u001b[34mpurrr \u001b[39m 0.3.3\n\u001b[32m✔\u001b[39m \u001b[34mtibble \u001b[39m 2.1.3 \u001b[32m✔\u001b[39m \u001b[34mdplyr \u001b[39m 0.8.3\n\u001b[32m✔\u001b[39m \u001b[34mtidyr \u001b[39m 1.0.0 \u001b[32m✔\u001b[39m \u001b[34mstringr\u001b[39m 1.4.0\n\u001b[32m✔\u001b[39m \u001b[34mreadr \u001b[39m 1.3.1 \u001b[32m✔\u001b[39m \u001b[34mforcats\u001b[39m 0.4.0\n── \u001b[1mConflicts\u001b[22m ────────────────────────────────────────── tidyverse_conflicts() ──\n\u001b[31m✖\u001b[39m \u001b[34mdplyr\u001b[39m::\u001b[32mfilter()\u001b[39m masks \u001b[34mstats\u001b[39m::filter()\n\u001b[31m✖\u001b[39m \u001b[34mdplyr\u001b[39m::\u001b[32mlag()\u001b[39m masks \u001b[34mstats\u001b[39m::lag()\n"],["library(tidyverse) # general\nlibrary(countrycode) # continent\nlibrary(rworldmap) # quick country-level heat maps\nlibrary(gridExtra) # plots\nlibrary(broom)","Loading required package: sp\n### Welcome to rworldmap ###\nFor a short introduction type : \t vignette('rworldmap')\n\nAttaching package: ‘gridExtra’\n\nThe following object is masked from ‘package:dplyr’:\n\n combine\n\n"],["ensureCranPkg <- function(pkg) {\n if(!suppressWarnings(requireNamespace(pkg, quietly = TRUE))) {\n install.packages(pkg)\n }\n}","_____no_output_____"],["ensureCranPkg(\"ggalt\")\nensureCranPkg(\"tidyverse\")\nensureCranPkg(\"countrycode\")\nensureCranPkg(\"rworldmap\")\nensureCranPkg(\"gridExtra\")","Installing package into ‘/home/anand/R/x86_64-pc-linux-gnu-library/3.4’\n(as ‘lib’ is unspecified)\nalso installing the dependency ‘proj4’\n\nWarning message in install.packages(pkg):\n“installation of package ‘proj4’ had non-zero exit status”Warning message in install.packages(pkg):\n“installation of package ‘ggalt’ had non-zero exit status”"],["data <- read_csv(\"datasets/this_is_only_for_reference.csv\")","Parsed with column specification:\ncols(\n country = \u001b[31mcol_character()\u001b[39m,\n year = \u001b[32mcol_double()\u001b[39m,\n sex = \u001b[31mcol_character()\u001b[39m,\n age = \u001b[31mcol_character()\u001b[39m,\n suicides_no = \u001b[32mcol_double()\u001b[39m,\n population = \u001b[32mcol_double()\u001b[39m,\n `suicides/100k pop` = \u001b[32mcol_double()\u001b[39m,\n `country-year` = \u001b[31mcol_character()\u001b[39m,\n `HDI for year` = \u001b[32mcol_double()\u001b[39m,\n `gdp_for_year ($)` = \u001b[32mcol_number()\u001b[39m,\n `gdp_per_capita ($)` = \u001b[32mcol_double()\u001b[39m,\n generation = \u001b[31mcol_character()\u001b[39m\n)\n"],["data <- data %>% \n select(-c(`HDI for year`, `suicides/100k pop`)) %>%\n rename(gdp_for_year = `gdp_for_year ($)`, \n gdp_per_capita = `gdp_per_capita ($)`, \n country_year = `country-year`) %>%\n as.data.frame()","_____no_output_____"],["data <- data %>%\n filter(year != 2016) %>% # I therefore exclude 2016 data\n select(-country_year)","_____no_output_____"],["minimum_years <- data %>%\n group_by(country) %>%\n summarize(rows = n(), \n years = rows / 12) %>%\n arrange(years)\n\ndata <- data %>%\n filter(!(country %in% head(minimum_years$country, 7)))","_____no_output_____"],["library(countrycode)","_____no_output_____"],["data$age <- gsub(\" years\", \"\", data$age)\ndata$sex <- ifelse(data$sex == \"male\", \"Male\", \"Female\")\n\n\n# getting continent data:\ndata$continent <- countrycode(sourcevar = data[, \"country\"],\n origin = \"country.name\",\n destination = \"continent\")\n\n# Nominal factors\ndata_nominal <- c('country', 'sex', 'continent')\ndata[data_nominal] <- lapply(data[data_nominal], function(x){factor(x)})\n\n\n# Making age ordinal\ndata$age <- factor(data$age, \n ordered = T, \n levels = c(\"5-14\",\n \"15-24\", \n \"25-34\", \n \"35-54\", \n \"55-74\", \n \"75+\"))\n\n# Making generation ordinal\ndata$generation <- factor(data$generation, \n ordered = T, \n levels = c(\"G.I. Generation\", \n \"Silent\",\n \"Boomers\", \n \"Generation X\", \n \"Millenials\", \n \"Generation Z\"))\n\ndata <- as_tibble(data)\n\n\n# the global rate over the time period will be useful:\n\nglobal_average <- (sum(as.numeric(data$suicides_no)) / sum(as.numeric(data$population))) * 100000","_____no_output_____"],["continent <- data %>%\n group_by(continent) %>%\n summarize(suicide_per_100k = (sum(as.numeric(suicides_no)) / sum(as.numeric(population))) * 100000) %>%\n arrange(suicide_per_100k)\n\ncontinent$continent <- factor(continent$continent, ordered = T, levels = continent$continent)\n\ncontinent_plot <- ggplot(continent, aes(x = continent, y = suicide_per_100k, fill = continent)) + \n geom_bar(stat = \"identity\") + \n labs(title = \"Global Suicides (per 100k), by Continent\",\n x = \"Continent\", \n y = \"Suicides per 100k\", \n fill = \"Continent\") +\n theme(legend.position = \"none\", title = element_text(size = 10)) + \n scale_y_continuous(breaks = seq(0, 20, 1), minor_breaks = F)","_____no_output_____"],["continent_plot","_____no_output_____"],["country <- data %>%\n group_by(country, continent) %>%\n summarize(n = n(), \n suicide_per_100k = (sum(as.numeric(suicides_no)) / sum(as.numeric(population))) * 100000) %>%\n arrange(desc(suicide_per_100k))\n\ncountry$country <- factor(country$country, \n ordered = T, \n levels = rev(country$country))\n\nggplot(country, aes(x = country, y = suicide_per_100k, fill = continent)) + \n geom_bar(stat = \"identity\") + \n geom_hline(yintercept = global_average, linetype = 2, color = \"grey35\", size = 1) +\n labs(title = \"Global suicides per 100k, by Country\",\n x = \"Country\", \n y = \"Suicides per 100k\", \n fill = \"Continent\") +\n coord_flip() +\n scale_y_continuous(breaks = seq(0, 45, 2)) + \n theme(legend.position = \"bottom\")","_____no_output_____"],["age_plot <- data %>%\n group_by(age) %>%\n summarize(suicide_per_100k = (sum(as.numeric(suicides_no)) / sum(as.numeric(population))) * 100000) %>%\n ggplot(aes(x = age, y = suicide_per_100k, fill = age)) + \n geom_bar(stat = \"identity\") + \n labs(title = \"Global suicides per 100k, by Age\",\n x = \"Age\", \n y = \"Suicides per 100k\") +\n theme(legend.position = \"none\") + \n scale_y_continuous(breaks = seq(0, 30, 1), minor_breaks = F)","_____no_output_____"],["age_plot","_____no_output_____"],["glimpse(data)","Observations: 27,492\nVariables: 10\n$ country \u001b[3m\u001b[38;5;246m\u001b[39m\u001b[23m Albania, Albania, Albania, Albania, Albania, Albania, …\n$ year \u001b[3m\u001b[38;5;246m\u001b[39m\u001b[23m 1987, 1987, 1987, 1987, 1987, 1987, 1987, 1987, 1987, …\n$ sex \u001b[3m\u001b[38;5;246m\u001b[39m\u001b[23m Male, Male, Female, Male, Male, Female, Female, Female…\n$ age \u001b[3m\u001b[38;5;246m\u001b[39m\u001b[23m 15-24, 35-54, 15-24, 75+, 25-34, 75+, 35-54, 25-34, 55…\n$ suicides_no \u001b[3m\u001b[38;5;246m\u001b[39m\u001b[23m 21, 16, 14, 1, 9, 1, 6, 4, 1, 0, 0, 0, 2, 17, 1, 14, 4…\n$ population \u001b[3m\u001b[38;5;246m\u001b[39m\u001b[23m 312900, 308000, 289700, 21800, 274300, 35600, 278800, …\n$ gdp_for_year \u001b[3m\u001b[38;5;246m\u001b[39m\u001b[23m 2156624900, 2156624900, 2156624900, 2156624900, 215662…\n$ gdp_per_capita \u001b[3m\u001b[38;5;246m\u001b[39m\u001b[23m 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796,…\n$ generation \u001b[3m\u001b[38;5;246m\u001b[39m\u001b[23m Generation X, Silent, Generation X, G.I. Generation, B…\n$ continent \u001b[3m\u001b[38;5;246m\u001b[39m\u001b[23m Europe, Europe, Europe, Europe, Europe, Europe, Europe…\n"],["corrplot.mixed(corr = cor(data[,c(\"year\",\"suicides_no\",\"population\",\"gdp_for_year\",\"gdp_per_capita\")]))","_____no_output_____"],["library(corrplot)","corrplot 0.84 loaded\n"],["country_mean_gdp <- data %>%\n group_by(country, continent) %>%\n summarize(suicide_per_100k = (sum(as.numeric(suicides_no)) / sum(as.numeric(population))) * 100000, \n gdp_per_capita = mean(gdp_per_capita))\n\nggplot(country_mean_gdp, aes(x = gdp_per_capita, y = suicide_per_100k, col = continent)) + \n geom_point() + \n scale_x_continuous(labels=scales::dollar_format(prefix=\"$\"), breaks = seq(0, 70000, 10000)) + \n labs(title = \"Correlation between GDP (per capita) and Suicides per 100k\", \n subtitle = \"Plot containing every country\",\n x = \"GDP (per capita)\", \n y = \"Suicides per 100k\", \n col = \"Continent\") ","_____no_output_____"],["data","_____no_output_____"],["data['gdp_per_capita ($)']","_____no_output_____"],["x=data.frame(data['gdp_per_capita ($)'])","_____no_output_____"],["y=data.frame(data['suicides/100k pop'])","_____no_output_____"],["plot(x,y,pch=19)","_____no_output_____"],["p <- plot(data['gdp_per_capita ($)'],data['suicides/100k pop'],pch=19)\nfit <- lm(y~poly(x,2,raw=TRUE)) \nprint(p)","_____no_output_____"],["data %>%\n group_by(continent, age) %>%\n summarize(n = n(), \n suicides = sum(as.numeric(suicides_no)), \n population = sum(as.numeric(population)), \n suicide_per_100k = (suicides / population) * 100000) %>%\n ggplot(aes(x = continent, y = suicide_per_100k, fill = age)) + \n geom_bar(stat = \"identity\", position = \"dodge\") + \n geom_hline(yintercept = global_average, linetype = 2, color = \"grey35\", size = 1) +\n labs(title = \"Age Disparity, by Continent\",\n x = \"Continent\", \n y = \"Suicides per 100k\", \n fill = \"Age\")","_____no_output_____"],["library(tidyr)\nlibrary(purrr)","_____no_output_____"],["ggplot(gdp_suicide_no_outliers, aes(x = gdp_per_capita, y = suicide_per_100k, col = continent)) + \n geom_point() + \n geom_smooth(method = \"lm\", aes(group = 1)) + \n scale_x_continuous(labels=scales::dollar_format(prefix=\"$\"), breaks = seq(0, 70000, 10000)) + \n labs(title = \"Correlation between GDP (per capita) and Suicides per 100k\", \n subtitle = \"Plot with high CooksD countries removed (5/93 total)\",\n x = \"GDP (per capita)\", \n y = \"Suicides per 100k\", \n col = \"Continent\") + \n theme(legend.position = \"none\")","_____no_output_____"],["gdp_suicide_no_outliers <- model1 %>%\n augment() %>%\n arrange(desc(.cooksd)) %>%\n filter(.cooksd < 4/nrow(.)) %>% # removes 5/93 countries\n inner_join(country_mean_gdp, by = c(\"suicide_per_100k\", \"gdp_per_capita\")) %>%\n select(country, continent, gdp_per_capita, suicide_per_100k)\n\n","_____no_output_____"],["data_second <- read_csv(\"datasets/this_is_only_for_reference.csv\")","Parsed with column specification:\ncols(\n country = \u001b[31mcol_character()\u001b[39m,\n year = \u001b[32mcol_double()\u001b[39m,\n sex = \u001b[31mcol_character()\u001b[39m,\n age = \u001b[31mcol_character()\u001b[39m,\n suicides_no = \u001b[32mcol_double()\u001b[39m,\n population = \u001b[32mcol_double()\u001b[39m,\n `suicides/100k pop` = \u001b[32mcol_double()\u001b[39m,\n `country-year` = \u001b[31mcol_character()\u001b[39m,\n `HDI for year` = \u001b[32mcol_double()\u001b[39m,\n `gdp_for_year ($)` = \u001b[32mcol_number()\u001b[39m,\n `gdp_per_capita ($)` = \u001b[32mcol_double()\u001b[39m,\n generation = \u001b[31mcol_character()\u001b[39m\n)\n"],["sapply(data_second, function(x) mean(is.na(df)))","Warning message in is.na(df):\n“is.na() applied to non-(list or vector) of type 'closure'”Warning message in is.na(df):\n“is.na() applied to non-(list or vector) of type 'closure'”Warning message in is.na(df):\n“is.na() applied to non-(list or vector) of type 'closure'”Warning message in is.na(df):\n“is.na() applied to non-(list or vector) of type 'closure'”Warning message in is.na(df):\n“is.na() applied to non-(list or vector) of type 'closure'”Warning message in is.na(df):\n“is.na() applied to non-(list or vector) of type 'closure'”Warning message in is.na(df):\n“is.na() applied to non-(list or vector) of type 'closure'”Warning message in is.na(df):\n“is.na() applied to non-(list or vector) of type 'closure'”Warning message in is.na(df):\n“is.na() applied to non-(list or vector) of type 'closure'”Warning message in is.na(df):\n“is.na() applied to non-(list or vector) of type 'closure'”Warning message in is.na(df):\n“is.na() applied to non-(list or vector) of type 'closure'”Warning message in is.na(df):\n“is.na() applied to non-(list or vector) of type 'closure'”"],["model1 <- lm(suicide_per_100k ~ gdp_per_capita, data = country_mean_gdp)","_____no_output_____"],["gdp_suicide_no_outliers <- model1 %>%\n augment() %>%\n arrange(desc(.cooksd)) %>%\n filter(.cooksd < 4/nrow(.)) %>% # removes 5/93 countries\n inner_join(country_mean_gdp, by = c(\"suicide_per_100k\", \"gdp_per_capita\")) %>%\n select(country, continent, gdp_per_capita, suicide_per_100k)","_____no_output_____"],["model2 <- lm(suicide_per_100k ~ gdp_per_capita, data = gdp_suicide_no_outliers)","_____no_output_____"],["summary(model2)","_____no_output_____"]]],"string":"[\n [\n [\n \"library(tidyverse) \",\n \"── \\u001b[1mAttaching packages\\u001b[22m ─────────────────────────────────────── tidyverse 1.2.1 ──\\n\\u001b[32m✔\\u001b[39m \\u001b[34mggplot2\\u001b[39m 3.2.1 \\u001b[32m✔\\u001b[39m \\u001b[34mpurrr \\u001b[39m 0.3.3\\n\\u001b[32m✔\\u001b[39m \\u001b[34mtibble \\u001b[39m 2.1.3 \\u001b[32m✔\\u001b[39m \\u001b[34mdplyr \\u001b[39m 0.8.3\\n\\u001b[32m✔\\u001b[39m \\u001b[34mtidyr \\u001b[39m 1.0.0 \\u001b[32m✔\\u001b[39m \\u001b[34mstringr\\u001b[39m 1.4.0\\n\\u001b[32m✔\\u001b[39m \\u001b[34mreadr \\u001b[39m 1.3.1 \\u001b[32m✔\\u001b[39m \\u001b[34mforcats\\u001b[39m 0.4.0\\n── \\u001b[1mConflicts\\u001b[22m ────────────────────────────────────────── tidyverse_conflicts() ──\\n\\u001b[31m✖\\u001b[39m \\u001b[34mdplyr\\u001b[39m::\\u001b[32mfilter()\\u001b[39m masks \\u001b[34mstats\\u001b[39m::filter()\\n\\u001b[31m✖\\u001b[39m \\u001b[34mdplyr\\u001b[39m::\\u001b[32mlag()\\u001b[39m masks \\u001b[34mstats\\u001b[39m::lag()\\n\"\n ],\n [\n \"library(tidyverse) # general\\nlibrary(countrycode) # continent\\nlibrary(rworldmap) # quick country-level heat maps\\nlibrary(gridExtra) # plots\\nlibrary(broom)\",\n \"Loading required package: sp\\n### Welcome to rworldmap ###\\nFor a short introduction type : \\t vignette('rworldmap')\\n\\nAttaching package: ‘gridExtra’\\n\\nThe following object is masked from ‘package:dplyr’:\\n\\n combine\\n\\n\"\n ],\n [\n \"ensureCranPkg <- function(pkg) {\\n if(!suppressWarnings(requireNamespace(pkg, quietly = TRUE))) {\\n install.packages(pkg)\\n }\\n}\",\n \"_____no_output_____\"\n ],\n [\n \"ensureCranPkg(\\\"ggalt\\\")\\nensureCranPkg(\\\"tidyverse\\\")\\nensureCranPkg(\\\"countrycode\\\")\\nensureCranPkg(\\\"rworldmap\\\")\\nensureCranPkg(\\\"gridExtra\\\")\",\n \"Installing package into ‘/home/anand/R/x86_64-pc-linux-gnu-library/3.4’\\n(as ‘lib’ is unspecified)\\nalso installing the dependency ‘proj4’\\n\\nWarning message in install.packages(pkg):\\n“installation of package ‘proj4’ had non-zero exit status”Warning message in install.packages(pkg):\\n“installation of package ‘ggalt’ had non-zero exit status”\"\n ],\n [\n \"data <- read_csv(\\\"datasets/this_is_only_for_reference.csv\\\")\",\n \"Parsed with column specification:\\ncols(\\n country = \\u001b[31mcol_character()\\u001b[39m,\\n year = \\u001b[32mcol_double()\\u001b[39m,\\n sex = \\u001b[31mcol_character()\\u001b[39m,\\n age = \\u001b[31mcol_character()\\u001b[39m,\\n suicides_no = \\u001b[32mcol_double()\\u001b[39m,\\n population = \\u001b[32mcol_double()\\u001b[39m,\\n `suicides/100k pop` = \\u001b[32mcol_double()\\u001b[39m,\\n `country-year` = \\u001b[31mcol_character()\\u001b[39m,\\n `HDI for year` = \\u001b[32mcol_double()\\u001b[39m,\\n `gdp_for_year ($)` = \\u001b[32mcol_number()\\u001b[39m,\\n `gdp_per_capita ($)` = \\u001b[32mcol_double()\\u001b[39m,\\n generation = \\u001b[31mcol_character()\\u001b[39m\\n)\\n\"\n ],\n [\n \"data <- data %>% \\n select(-c(`HDI for year`, `suicides/100k pop`)) %>%\\n rename(gdp_for_year = `gdp_for_year ($)`, \\n gdp_per_capita = `gdp_per_capita ($)`, \\n country_year = `country-year`) %>%\\n as.data.frame()\",\n \"_____no_output_____\"\n ],\n [\n \"data <- data %>%\\n filter(year != 2016) %>% # I therefore exclude 2016 data\\n select(-country_year)\",\n \"_____no_output_____\"\n ],\n [\n \"minimum_years <- data %>%\\n group_by(country) %>%\\n summarize(rows = n(), \\n years = rows / 12) %>%\\n arrange(years)\\n\\ndata <- data %>%\\n filter(!(country %in% head(minimum_years$country, 7)))\",\n \"_____no_output_____\"\n ],\n [\n \"library(countrycode)\",\n \"_____no_output_____\"\n ],\n [\n \"data$age <- gsub(\\\" years\\\", \\\"\\\", data$age)\\ndata$sex <- ifelse(data$sex == \\\"male\\\", \\\"Male\\\", \\\"Female\\\")\\n\\n\\n# getting continent data:\\ndata$continent <- countrycode(sourcevar = data[, \\\"country\\\"],\\n origin = \\\"country.name\\\",\\n destination = \\\"continent\\\")\\n\\n# Nominal factors\\ndata_nominal <- c('country', 'sex', 'continent')\\ndata[data_nominal] <- lapply(data[data_nominal], function(x){factor(x)})\\n\\n\\n# Making age ordinal\\ndata$age <- factor(data$age, \\n ordered = T, \\n levels = c(\\\"5-14\\\",\\n \\\"15-24\\\", \\n \\\"25-34\\\", \\n \\\"35-54\\\", \\n \\\"55-74\\\", \\n \\\"75+\\\"))\\n\\n# Making generation ordinal\\ndata$generation <- factor(data$generation, \\n ordered = T, \\n levels = c(\\\"G.I. Generation\\\", \\n \\\"Silent\\\",\\n \\\"Boomers\\\", \\n \\\"Generation X\\\", \\n \\\"Millenials\\\", \\n \\\"Generation Z\\\"))\\n\\ndata <- as_tibble(data)\\n\\n\\n# the global rate over the time period will be useful:\\n\\nglobal_average <- (sum(as.numeric(data$suicides_no)) / sum(as.numeric(data$population))) * 100000\",\n \"_____no_output_____\"\n ],\n [\n \"continent <- data %>%\\n group_by(continent) %>%\\n summarize(suicide_per_100k = (sum(as.numeric(suicides_no)) / sum(as.numeric(population))) * 100000) %>%\\n arrange(suicide_per_100k)\\n\\ncontinent$continent <- factor(continent$continent, ordered = T, levels = continent$continent)\\n\\ncontinent_plot <- ggplot(continent, aes(x = continent, y = suicide_per_100k, fill = continent)) + \\n geom_bar(stat = \\\"identity\\\") + \\n labs(title = \\\"Global Suicides (per 100k), by Continent\\\",\\n x = \\\"Continent\\\", \\n y = \\\"Suicides per 100k\\\", \\n fill = \\\"Continent\\\") +\\n theme(legend.position = \\\"none\\\", title = element_text(size = 10)) + \\n scale_y_continuous(breaks = seq(0, 20, 1), minor_breaks = F)\",\n \"_____no_output_____\"\n ],\n [\n \"continent_plot\",\n \"_____no_output_____\"\n ],\n [\n \"country <- data %>%\\n group_by(country, continent) %>%\\n summarize(n = n(), \\n suicide_per_100k = (sum(as.numeric(suicides_no)) / sum(as.numeric(population))) * 100000) %>%\\n arrange(desc(suicide_per_100k))\\n\\ncountry$country <- factor(country$country, \\n ordered = T, \\n levels = rev(country$country))\\n\\nggplot(country, aes(x = country, y = suicide_per_100k, fill = continent)) + \\n geom_bar(stat = \\\"identity\\\") + \\n geom_hline(yintercept = global_average, linetype = 2, color = \\\"grey35\\\", size = 1) +\\n labs(title = \\\"Global suicides per 100k, by Country\\\",\\n x = \\\"Country\\\", \\n y = \\\"Suicides per 100k\\\", \\n fill = \\\"Continent\\\") +\\n coord_flip() +\\n scale_y_continuous(breaks = seq(0, 45, 2)) + \\n theme(legend.position = \\\"bottom\\\")\",\n \"_____no_output_____\"\n ],\n [\n \"age_plot <- data %>%\\n group_by(age) %>%\\n summarize(suicide_per_100k = (sum(as.numeric(suicides_no)) / sum(as.numeric(population))) * 100000) %>%\\n ggplot(aes(x = age, y = suicide_per_100k, fill = age)) + \\n geom_bar(stat = \\\"identity\\\") + \\n labs(title = \\\"Global suicides per 100k, by Age\\\",\\n x = \\\"Age\\\", \\n y = \\\"Suicides per 100k\\\") +\\n theme(legend.position = \\\"none\\\") + \\n scale_y_continuous(breaks = seq(0, 30, 1), minor_breaks = F)\",\n \"_____no_output_____\"\n ],\n [\n \"age_plot\",\n \"_____no_output_____\"\n ],\n [\n \"glimpse(data)\",\n \"Observations: 27,492\\nVariables: 10\\n$ country \\u001b[3m\\u001b[38;5;246m\\u001b[39m\\u001b[23m Albania, Albania, Albania, Albania, Albania, Albania, …\\n$ year \\u001b[3m\\u001b[38;5;246m\\u001b[39m\\u001b[23m 1987, 1987, 1987, 1987, 1987, 1987, 1987, 1987, 1987, …\\n$ sex \\u001b[3m\\u001b[38;5;246m\\u001b[39m\\u001b[23m Male, Male, Female, Male, Male, Female, Female, Female…\\n$ age \\u001b[3m\\u001b[38;5;246m\\u001b[39m\\u001b[23m 15-24, 35-54, 15-24, 75+, 25-34, 75+, 35-54, 25-34, 55…\\n$ suicides_no \\u001b[3m\\u001b[38;5;246m\\u001b[39m\\u001b[23m 21, 16, 14, 1, 9, 1, 6, 4, 1, 0, 0, 0, 2, 17, 1, 14, 4…\\n$ population \\u001b[3m\\u001b[38;5;246m\\u001b[39m\\u001b[23m 312900, 308000, 289700, 21800, 274300, 35600, 278800, …\\n$ gdp_for_year \\u001b[3m\\u001b[38;5;246m\\u001b[39m\\u001b[23m 2156624900, 2156624900, 2156624900, 2156624900, 215662…\\n$ gdp_per_capita \\u001b[3m\\u001b[38;5;246m\\u001b[39m\\u001b[23m 796, 796, 796, 796, 796, 796, 796, 796, 796, 796, 796,…\\n$ generation \\u001b[3m\\u001b[38;5;246m\\u001b[39m\\u001b[23m Generation X, Silent, Generation X, G.I. Generation, B…\\n$ continent \\u001b[3m\\u001b[38;5;246m\\u001b[39m\\u001b[23m Europe, Europe, Europe, Europe, Europe, Europe, Europe…\\n\"\n ],\n [\n \"corrplot.mixed(corr = cor(data[,c(\\\"year\\\",\\\"suicides_no\\\",\\\"population\\\",\\\"gdp_for_year\\\",\\\"gdp_per_capita\\\")]))\",\n \"_____no_output_____\"\n ],\n [\n \"library(corrplot)\",\n \"corrplot 0.84 loaded\\n\"\n ],\n [\n \"country_mean_gdp <- data %>%\\n group_by(country, continent) %>%\\n summarize(suicide_per_100k = (sum(as.numeric(suicides_no)) / sum(as.numeric(population))) * 100000, \\n gdp_per_capita = mean(gdp_per_capita))\\n\\nggplot(country_mean_gdp, aes(x = gdp_per_capita, y = suicide_per_100k, col = continent)) + \\n geom_point() + \\n scale_x_continuous(labels=scales::dollar_format(prefix=\\\"$\\\"), breaks = seq(0, 70000, 10000)) + \\n labs(title = \\\"Correlation between GDP (per capita) and Suicides per 100k\\\", \\n subtitle = \\\"Plot containing every country\\\",\\n x = \\\"GDP (per capita)\\\", \\n y = \\\"Suicides per 100k\\\", \\n col = \\\"Continent\\\") \",\n \"_____no_output_____\"\n ],\n [\n \"data\",\n \"_____no_output_____\"\n ],\n [\n \"data['gdp_per_capita ($)']\",\n \"_____no_output_____\"\n ],\n [\n \"x=data.frame(data['gdp_per_capita ($)'])\",\n \"_____no_output_____\"\n ],\n [\n \"y=data.frame(data['suicides/100k pop'])\",\n \"_____no_output_____\"\n ],\n [\n \"plot(x,y,pch=19)\",\n \"_____no_output_____\"\n ],\n [\n \"p <- plot(data['gdp_per_capita ($)'],data['suicides/100k pop'],pch=19)\\nfit <- lm(y~poly(x,2,raw=TRUE)) \\nprint(p)\",\n \"_____no_output_____\"\n ],\n [\n \"data %>%\\n group_by(continent, age) %>%\\n summarize(n = n(), \\n suicides = sum(as.numeric(suicides_no)), \\n population = sum(as.numeric(population)), \\n suicide_per_100k = (suicides / population) * 100000) %>%\\n ggplot(aes(x = continent, y = suicide_per_100k, fill = age)) + \\n geom_bar(stat = \\\"identity\\\", position = \\\"dodge\\\") + \\n geom_hline(yintercept = global_average, linetype = 2, color = \\\"grey35\\\", size = 1) +\\n labs(title = \\\"Age Disparity, by Continent\\\",\\n x = \\\"Continent\\\", \\n y = \\\"Suicides per 100k\\\", \\n fill = \\\"Age\\\")\",\n \"_____no_output_____\"\n ],\n [\n \"library(tidyr)\\nlibrary(purrr)\",\n \"_____no_output_____\"\n ],\n [\n \"ggplot(gdp_suicide_no_outliers, aes(x = gdp_per_capita, y = suicide_per_100k, col = continent)) + \\n geom_point() + \\n geom_smooth(method = \\\"lm\\\", aes(group = 1)) + \\n scale_x_continuous(labels=scales::dollar_format(prefix=\\\"$\\\"), breaks = seq(0, 70000, 10000)) + \\n labs(title = \\\"Correlation between GDP (per capita) and Suicides per 100k\\\", \\n subtitle = \\\"Plot with high CooksD countries removed (5/93 total)\\\",\\n x = \\\"GDP (per capita)\\\", \\n y = \\\"Suicides per 100k\\\", \\n col = \\\"Continent\\\") + \\n theme(legend.position = \\\"none\\\")\",\n \"_____no_output_____\"\n ],\n [\n \"gdp_suicide_no_outliers <- model1 %>%\\n augment() %>%\\n arrange(desc(.cooksd)) %>%\\n filter(.cooksd < 4/nrow(.)) %>% # removes 5/93 countries\\n inner_join(country_mean_gdp, by = c(\\\"suicide_per_100k\\\", \\\"gdp_per_capita\\\")) %>%\\n select(country, continent, gdp_per_capita, suicide_per_100k)\\n\\n\",\n \"_____no_output_____\"\n ],\n [\n \"data_second <- read_csv(\\\"datasets/this_is_only_for_reference.csv\\\")\",\n \"Parsed with column specification:\\ncols(\\n country = \\u001b[31mcol_character()\\u001b[39m,\\n year = \\u001b[32mcol_double()\\u001b[39m,\\n sex = \\u001b[31mcol_character()\\u001b[39m,\\n age = \\u001b[31mcol_character()\\u001b[39m,\\n suicides_no = \\u001b[32mcol_double()\\u001b[39m,\\n population = \\u001b[32mcol_double()\\u001b[39m,\\n `suicides/100k pop` = \\u001b[32mcol_double()\\u001b[39m,\\n `country-year` = \\u001b[31mcol_character()\\u001b[39m,\\n `HDI for year` = \\u001b[32mcol_double()\\u001b[39m,\\n `gdp_for_year ($)` = \\u001b[32mcol_number()\\u001b[39m,\\n `gdp_per_capita ($)` = \\u001b[32mcol_double()\\u001b[39m,\\n generation = \\u001b[31mcol_character()\\u001b[39m\\n)\\n\"\n ],\n [\n \"sapply(data_second, function(x) mean(is.na(df)))\",\n \"Warning message in is.na(df):\\n“is.na() applied to non-(list or vector) of type 'closure'”Warning message in is.na(df):\\n“is.na() applied to non-(list or vector) of type 'closure'”Warning message in is.na(df):\\n“is.na() applied to non-(list or vector) of type 'closure'”Warning message in is.na(df):\\n“is.na() applied to non-(list or vector) of type 'closure'”Warning message in is.na(df):\\n“is.na() applied to non-(list or vector) of type 'closure'”Warning message in is.na(df):\\n“is.na() applied to non-(list or vector) of type 'closure'”Warning message in is.na(df):\\n“is.na() applied to non-(list or vector) of type 'closure'”Warning message in is.na(df):\\n“is.na() applied to non-(list or vector) of type 'closure'”Warning message in is.na(df):\\n“is.na() applied to non-(list or vector) of type 'closure'”Warning message in is.na(df):\\n“is.na() applied to non-(list or vector) of type 'closure'”Warning message in is.na(df):\\n“is.na() applied to non-(list or vector) of type 'closure'”Warning message in is.na(df):\\n“is.na() applied to non-(list or vector) of type 'closure'”\"\n ],\n [\n \"model1 <- lm(suicide_per_100k ~ gdp_per_capita, data = country_mean_gdp)\",\n \"_____no_output_____\"\n ],\n [\n \"gdp_suicide_no_outliers <- model1 %>%\\n augment() %>%\\n arrange(desc(.cooksd)) %>%\\n filter(.cooksd < 4/nrow(.)) %>% # removes 5/93 countries\\n inner_join(country_mean_gdp, by = c(\\\"suicide_per_100k\\\", \\\"gdp_per_capita\\\")) %>%\\n select(country, continent, gdp_per_capita, suicide_per_100k)\",\n \"_____no_output_____\"\n ],\n [\n \"model2 <- lm(suicide_per_100k ~ gdp_per_capita, data = gdp_suicide_no_outliers)\",\n \"_____no_output_____\"\n ],\n [\n \"summary(model2)\",\n \"_____no_output_____\"\n ]\n ]\n]"},"cell_types":{"kind":"list like","value":["code"],"string":"[\n \"code\"\n]"},"cell_type_groups":{"kind":"list like","value":[["code","code","code","code","code","code","code","code","code","code","code","code","code","code","code","code","code","code","code","code","code","code","code","code","code","code","code","code","code","code","code","code","code","code","code"]],"string":"[\n [\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\"\n ]\n]"}}},{"rowIdx":1458820,"cells":{"hexsha":{"kind":"string","value":"e7e38cea1ccd510b490732b6c8c87962ecf25192"},"size":{"kind":"number","value":29512,"string":"29,512"},"ext":{"kind":"string","value":"ipynb"},"lang":{"kind":"string","value":"Jupyter Notebook"},"max_stars_repo_path":{"kind":"string","value":"2. Training and Detection.ipynb"},"max_stars_repo_name":{"kind":"string","value":"luchaoshi45/tensorflow_jupyter_cnn"},"max_stars_repo_head_hexsha":{"kind":"string","value":"342f8cd8dabc661aaa6ccd99b2c3c4a9ad12bdc3"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"2. Training and Detection.ipynb"},"max_issues_repo_name":{"kind":"string","value":"luchaoshi45/tensorflow_jupyter_cnn"},"max_issues_repo_head_hexsha":{"kind":"string","value":"342f8cd8dabc661aaa6ccd99b2c3c4a9ad12bdc3"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"2. Training and Detection.ipynb"},"max_forks_repo_name":{"kind":"string","value":"luchaoshi45/tensorflow_jupyter_cnn"},"max_forks_repo_head_hexsha":{"kind":"string","value":"342f8cd8dabc661aaa6ccd99b2c3c4a9ad12bdc3"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"avg_line_length":{"kind":"number","value":26.9270072993,"string":"26.927007"},"max_line_length":{"kind":"number","value":398,"string":"398"},"alphanum_fraction":{"kind":"number","value":0.5507251288,"string":"0.550725"},"cells":{"kind":"list like","value":[[["# 0. Setup Paths","_____no_output_____"]],[["import os","_____no_output_____"],["CUSTOM_MODEL_NAME = 'my_ssd_mobnet' \nPRETRAINED_MODEL_NAME = 'ssd_mobilenet_v2_fpnlite_320x320_coco17_tpu-8'\nPRETRAINED_MODEL_URL = 'http://download.tensorflow.org/models/object_detection/tf2/20200711/ssd_mobilenet_v2_fpnlite_320x320_coco17_tpu-8.tar.gz'\nTF_RECORD_SCRIPT_NAME = 'generate_tfrecord.py'\nLABEL_MAP_NAME = 'label_map.pbtxt'","_____no_output_____"],["paths = {\n 'WORKSPACE_PATH': os.path.join('Tensorflow', 'workspace'),\n 'SCRIPTS_PATH': os.path.join('Tensorflow','scripts'),\n 'APIMODEL_PATH': os.path.join('Tensorflow','models'),\n 'ANNOTATION_PATH': os.path.join('Tensorflow', 'workspace','annotations'),\n 'IMAGE_PATH': os.path.join('Tensorflow', 'workspace','images'),\n 'MODEL_PATH': os.path.join('Tensorflow', 'workspace','models'),\n 'PRETRAINED_MODEL_PATH': os.path.join('Tensorflow', 'workspace','pre-trained-models'),\n 'CHECKPOINT_PATH': os.path.join('Tensorflow', 'workspace','models',CUSTOM_MODEL_NAME), \n 'OUTPUT_PATH': os.path.join('Tensorflow', 'workspace','models',CUSTOM_MODEL_NAME, 'export'), \n 'TFJS_PATH':os.path.join('Tensorflow', 'workspace','models',CUSTOM_MODEL_NAME, 'tfjsexport'), \n 'TFLITE_PATH':os.path.join('Tensorflow', 'workspace','models',CUSTOM_MODEL_NAME, 'tfliteexport'), \n 'PROTOC_PATH':os.path.join('Tensorflow','protoc')\n }","_____no_output_____"],["files = {\n 'PIPELINE_CONFIG':os.path.join('Tensorflow', 'workspace','models', CUSTOM_MODEL_NAME, 'pipeline.config'),\n 'TF_RECORD_SCRIPT': os.path.join(paths['SCRIPTS_PATH'], TF_RECORD_SCRIPT_NAME), \n 'LABELMAP': os.path.join(paths['ANNOTATION_PATH'], LABEL_MAP_NAME)\n}","_____no_output_____"],["for path in paths.values():\n if not os.path.exists(path):\n if os.name == 'posix':\n !mkdir -p {path}\n if os.name == 'nt':\n !mkdir {path}","_____no_output_____"]],[["# 1. Download TF Models Pretrained Models from Tensorflow Model Zoo and Install TFOD","_____no_output_____"]],[["# https://www.tensorflow.org/install/source_windows","_____no_output_____"],["if os.name=='nt':\n !pip install wget\n import wget","_____no_output_____"],["if not os.path.exists(os.path.join(paths['APIMODEL_PATH'], 'research', 'object_detection')):\n !git clone https://github.com/tensorflow/models {paths['APIMODEL_PATH']}","_____no_output_____"],["# Install Tensorflow Object Detection \nif os.name=='posix': \n !apt-get install protobuf-compiler\n !cd Tensorflow/models/research && protoc object_detection/protos/*.proto --python_out=. && cp object_detection/packages/tf2/setup.py . && python -m pip install . \n \nif os.name=='nt':\n url=\"https://github.com/protocolbuffers/protobuf/releases/download/v3.15.6/protoc-3.15.6-win64.zip\"\n wget.download(url)\n !move protoc-3.15.6-win64.zip {paths['PROTOC_PATH']}\n !cd {paths['PROTOC_PATH']} && tar -xf protoc-3.15.6-win64.zip\n os.environ['PATH'] += os.pathsep + os.path.abspath(os.path.join(paths['PROTOC_PATH'], 'bin')) \n !cd Tensorflow/models/research && protoc object_detection/protos/*.proto --python_out=. && copy object_detection\\\\packages\\\\tf2\\\\setup.py setup.py && python setup.py build && python setup.py install\n !cd Tensorflow/models/research/slim && pip install -e . ","_____no_output_____"],["VERIFICATION_SCRIPT = os.path.join(paths['APIMODEL_PATH'], 'research', 'object_detection', 'builders', 'model_builder_tf2_test.py')\n# Verify Installation\n!python {VERIFICATION_SCRIPT}","_____no_output_____"],["!pip install pyyaml\n!pip install tensorflow_io\n!pip install protobuf\n!pip install scipy\n!pip install pillow\n!pip install matplotlib\n!pip install pandas\n!pip install pycocotools","_____no_output_____"],["!pip install tensorflow --upgrade","_____no_output_____"],["!pip uninstall protobuf matplotlib -y\n!pip install protobuf matplotlib==3.2","_____no_output_____"],["import object_detection","_____no_output_____"],["if os.name =='posix':\n !wget {PRETRAINED_MODEL_URL}\n !mv {PRETRAINED_MODEL_NAME+'.tar.gz'} {paths['PRETRAINED_MODEL_PATH']}\n !cd {paths['PRETRAINED_MODEL_PATH']} && tar -zxvf {PRETRAINED_MODEL_NAME+'.tar.gz'}\nif os.name == 'nt':\n wget.download(PRETRAINED_MODEL_URL)\n !move {PRETRAINED_MODEL_NAME+'.tar.gz'} {paths['PRETRAINED_MODEL_PATH']}\n !cd {paths['PRETRAINED_MODEL_PATH']} && tar -zxvf {PRETRAINED_MODEL_NAME+'.tar.gz'}","_____no_output_____"]],[["# 2. Create Label Map","_____no_output_____"]],[["labels = [{'name':'stone', 'id':1}, {'name':'cloth', 'id':2}, {'name':'scissors', 'id':3}]\n\nwith open(files['LABELMAP'], 'w') as f:\n for label in labels:\n f.write('item { \\n')\n f.write('\\tname:\\'{}\\'\\n'.format(label['name']))\n f.write('\\tid:{}\\n'.format(label['id']))\n f.write('}\\n')","_____no_output_____"]],[["# 3. Create TF records","_____no_output_____"]],[["# OPTIONAL IF RUNNING ON COLAB\nARCHIVE_FILES = os.path.join(paths['IMAGE_PATH'], 'archive.tar.gz')\nif os.path.exists(ARCHIVE_FILES):\n !tar -zxvf {ARCHIVE_FILES}","_____no_output_____"],["if not os.path.exists(files['TF_RECORD_SCRIPT']):\n !git clone https://github.com/nicknochnack/GenerateTFRecord {paths['SCRIPTS_PATH']}","_____no_output_____"],["!python {files['TF_RECORD_SCRIPT']} -x {os.path.join(paths['IMAGE_PATH'], 'train')} -l {files['LABELMAP']} -o {os.path.join(paths['ANNOTATION_PATH'], 'train.record')} \n!python {files['TF_RECORD_SCRIPT']} -x {os.path.join(paths['IMAGE_PATH'], 'test')} -l {files['LABELMAP']} -o {os.path.join(paths['ANNOTATION_PATH'], 'test.record')} ","_____no_output_____"]],[["# 4. Copy Model Config to Training Folder","_____no_output_____"]],[["if os.name =='posix':\n !cp {os.path.join(paths['PRETRAINED_MODEL_PATH'], PRETRAINED_MODEL_NAME, 'pipeline.config')} {os.path.join(paths['CHECKPOINT_PATH'])}\nif os.name == 'nt':\n !copy {os.path.join(paths['PRETRAINED_MODEL_PATH'], PRETRAINED_MODEL_NAME, 'pipeline.config')} {os.path.join(paths['CHECKPOINT_PATH'])}","_____no_output_____"]],[["# 5. Update Config For Transfer Learning","_____no_output_____"]],[["import tensorflow as tf\nfrom object_detection.utils import config_util\nfrom object_detection.protos import pipeline_pb2\nfrom google.protobuf import text_format","_____no_output_____"],["config = config_util.get_configs_from_pipeline_file(files['PIPELINE_CONFIG'])","_____no_output_____"],["config","_____no_output_____"],["pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()\nwith tf.io.gfile.GFile(files['PIPELINE_CONFIG'], \"r\") as f: \n proto_str = f.read() \n text_format.Merge(proto_str, pipeline_config) ","_____no_output_____"],["pipeline_config.model.ssd.num_classes = len(labels)\npipeline_config.train_config.batch_size = 4\npipeline_config.train_config.fine_tune_checkpoint = os.path.join(paths['PRETRAINED_MODEL_PATH'], PRETRAINED_MODEL_NAME, 'checkpoint', 'ckpt-0')\npipeline_config.train_config.fine_tune_checkpoint_type = \"detection\"\npipeline_config.train_input_reader.label_map_path= files['LABELMAP']\npipeline_config.train_input_reader.tf_record_input_reader.input_path[:] = [os.path.join(paths['ANNOTATION_PATH'], 'train.record')]\npipeline_config.eval_input_reader[0].label_map_path = files['LABELMAP']\npipeline_config.eval_input_reader[0].tf_record_input_reader.input_path[:] = [os.path.join(paths['ANNOTATION_PATH'], 'test.record')]","_____no_output_____"],["config_text = text_format.MessageToString(pipeline_config) \nwith tf.io.gfile.GFile(files['PIPELINE_CONFIG'], \"wb\") as f: \n f.write(config_text) ","_____no_output_____"]],[["# 6. Train the model","_____no_output_____"]],[["!pip install lvis\n!pip install gin\n!pip install gin-config\n!pip install tensorflow_addons","_____no_output_____"],["TRAINING_SCRIPT = os.path.join(paths['APIMODEL_PATH'], 'research', 'object_detection', 'model_main_tf2.py')","_____no_output_____"],["command = \"python {} --model_dir={} --pipeline_config_path={} --num_train_steps=2000\".format(TRAINING_SCRIPT, paths['CHECKPOINT_PATH'],files['PIPELINE_CONFIG'])","_____no_output_____"],["print(command)","_____no_output_____"],["#!{command}","_____no_output_____"]],[["# 7. Evaluate the Model","_____no_output_____"]],[["command = \"python {} --model_dir={} --pipeline_config_path={} --checkpoint_dir={}\".format(TRAINING_SCRIPT, paths['CHECKPOINT_PATH'],files['PIPELINE_CONFIG'], paths['CHECKPOINT_PATH'])","_____no_output_____"],["print(command)","_____no_output_____"],["#!{command}","_____no_output_____"]],[["# 8. Load Train Model From Checkpoint","_____no_output_____"]],[["import os\nimport tensorflow as tf\nfrom object_detection.utils import label_map_util\nfrom object_detection.utils import visualization_utils as viz_utils\nfrom object_detection.builders import model_builder\nfrom object_detection.utils import config_util","_____no_output_____"],["# Load pipeline config and build a detection model\nconfigs = config_util.get_configs_from_pipeline_file(files['PIPELINE_CONFIG'])\ndetection_model = model_builder.build(model_config=configs['model'], is_training=False)\n\n# Restore checkpoint\nckpt = tf.compat.v2.train.Checkpoint(model=detection_model)\nckpt.restore(os.path.join(paths['CHECKPOINT_PATH'], 'ckpt-3')).expect_partial()\n\n@tf.function\ndef detect_fn(image):\n image, shapes = detection_model.preprocess(image)\n prediction_dict = detection_model.predict(image, shapes)\n detections = detection_model.postprocess(prediction_dict, shapes)\n return detections","_____no_output_____"]],[["# 9. Detect from an Image","_____no_output_____"]],[["import cv2 \nimport numpy as np\nfrom matplotlib import pyplot as plt\n%matplotlib inline","_____no_output_____"],["category_index = label_map_util.create_category_index_from_labelmap(files['LABELMAP'])","_____no_output_____"],["IMAGE_PATH = os.path.join(paths['IMAGE_PATH'], 'test', 'scissors.ce01a4a7-a850-11ec-85bd-005056c00008.jpg')","_____no_output_____"],["img = cv2.imread(IMAGE_PATH)\nimage_np = np.array(img)\n\ninput_tensor = tf.convert_to_tensor(np.expand_dims(image_np, 0), dtype=tf.float32)\ndetections = detect_fn(input_tensor)\n\nnum_detections = int(detections.pop('num_detections'))\ndetections = {key: value[0, :num_detections].numpy()\n for key, value in detections.items()}\ndetections['num_detections'] = num_detections\n\n# detection_classes should be ints.\ndetections['detection_classes'] = detections['detection_classes'].astype(np.int64)\n\nlabel_id_offset = 1\nimage_np_with_detections = image_np.copy()\n\nviz_utils.visualize_boxes_and_labels_on_image_array(\n image_np_with_detections,\n detections['detection_boxes'],\n detections['detection_classes']+label_id_offset,\n detections['detection_scores'],\n category_index,\n use_normalized_coordinates=True,\n max_boxes_to_draw=5,\n min_score_thresh=.8,\n agnostic_mode=False)\n\nplt.imshow(cv2.cvtColor(image_np_with_detections, cv2.COLOR_BGR2RGB))\nplt.show()","_____no_output_____"]],[["# 10. Real Time Detections from your Webcam","_____no_output_____"]],[["!pip uninstall opencv-python-headless -y","_____no_output_____"],["cap = cv2.VideoCapture(0)\nwidth = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\nheight = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n\nwhile cap.isOpened(): \n ret, frame = cap.read()\n frame = cv2.flip(frame,1,dst=None) #水平镜像\n image_np = np.array(frame)\n \n input_tensor = tf.convert_to_tensor(np.expand_dims(image_np, 0), dtype=tf.float32)\n detections = detect_fn(input_tensor)\n \n num_detections = int(detections.pop('num_detections'))\n detections = {key: value[0, :num_detections].numpy()\n for key, value in detections.items()}\n detections['num_detections'] = num_detections\n\n # detection_classes should be ints.\n detections['detection_classes'] = detections['detection_classes'].astype(np.int64)\n\n label_id_offset = 1\n image_np_with_detections = image_np.copy()\n\n viz_utils.visualize_boxes_and_labels_on_image_array(\n image_np_with_detections,\n detections['detection_boxes'],\n detections['detection_classes']+label_id_offset,\n detections['detection_scores'],\n category_index,\n use_normalized_coordinates=True,\n max_boxes_to_draw=5,\n min_score_thresh=.8,\n agnostic_mode=False)\n\n cv2.imshow('object detection', cv2.resize(image_np_with_detections, (800, 600)))\n \n if cv2.waitKey(10) & 0xFF == ord('q'):\n cap.release()\n cv2.destroyAllWindows()\n break","_____no_output_____"]],[["# 10. Freezing the Graph","_____no_output_____"]],[["FREEZE_SCRIPT = os.path.join(paths['APIMODEL_PATH'], 'research', 'object_detection', 'exporter_main_v2.py ')","_____no_output_____"],["FREEZE_SCRIPT","_____no_output_____"],["command = \"python {} --input_type=image_tensor --pipeline_config_path={} --trained_checkpoint_dir={} --output_directory={}\".format(FREEZE_SCRIPT ,files['PIPELINE_CONFIG'], paths['CHECKPOINT_PATH'], paths['OUTPUT_PATH'])","_____no_output_____"],["print(command)","_____no_output_____"],["!{command}","_____no_output_____"]],[["# 11. Conversion to TFJS","_____no_output_____"]],[["!pip install tensorflowjs","_____no_output_____"],["command = \"tensorflowjs_converter --input_format=tf_saved_model --output_node_names='detection_boxes,detection_classes,detection_features,detection_multiclass_scores,detection_scores,num_detections,raw_detection_boxes,raw_detection_scores' --output_format=tfjs_graph_model --signature_name=serving_default {} {}\".format(os.path.join(paths['OUTPUT_PATH'], 'saved_model'), paths['TFJS_PATH'])","_____no_output_____"],["print(command)","_____no_output_____"],["!{command}","_____no_output_____"],["# Test Code: https://github.com/nicknochnack/RealTimeSignLanguageDetectionwithTFJS","_____no_output_____"]],[["# 12. Conversion to TFLite","_____no_output_____"]],[["TFLITE_SCRIPT = os.path.join(paths['APIMODEL_PATH'], 'research', 'object_detection', 'export_tflite_graph_tf2.py ')","_____no_output_____"],["command = \"python {} --pipeline_config_path={} --trained_checkpoint_dir={} --output_directory={}\".format(TFLITE_SCRIPT ,files['PIPELINE_CONFIG'], paths['CHECKPOINT_PATH'], paths['TFLITE_PATH'])","_____no_output_____"],["print(command)","_____no_output_____"],["!{command}","_____no_output_____"],["FROZEN_TFLITE_PATH = os.path.join(paths['TFLITE_PATH'], 'saved_model')\nTFLITE_MODEL = os.path.join(paths['TFLITE_PATH'], 'saved_model', 'detect.tflite')","_____no_output_____"],["command = \"tflite_convert \\\n--saved_model_dir={} \\\n--output_file={} \\\n--input_shapes=1,300,300,3 \\\n--input_arrays=normalized_input_image_tensor \\\n--output_arrays='TFLite_Detection_PostProcess','TFLite_Detection_PostProcess:1','TFLite_Detection_PostProcess:2','TFLite_Detection_PostProcess:3' \\\n--inference_type=FLOAT \\\n--allow_custom_ops\".format(FROZEN_TFLITE_PATH, TFLITE_MODEL, )","_____no_output_____"],["print(command)","_____no_output_____"],["!{command}","_____no_output_____"]],[["# 13. Zip and Export Models ","_____no_output_____"]],[["!tar -czf models.tar.gz {paths['CHECKPOINT_PATH']}","_____no_output_____"],["from google.colab import drive\ndrive.mount('/content/drive')","_____no_output_____"]]],"string":"[\n [\n [\n \"# 0. Setup Paths\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"import os\",\n \"_____no_output_____\"\n ],\n [\n \"CUSTOM_MODEL_NAME = 'my_ssd_mobnet' \\nPRETRAINED_MODEL_NAME = 'ssd_mobilenet_v2_fpnlite_320x320_coco17_tpu-8'\\nPRETRAINED_MODEL_URL = 'http://download.tensorflow.org/models/object_detection/tf2/20200711/ssd_mobilenet_v2_fpnlite_320x320_coco17_tpu-8.tar.gz'\\nTF_RECORD_SCRIPT_NAME = 'generate_tfrecord.py'\\nLABEL_MAP_NAME = 'label_map.pbtxt'\",\n \"_____no_output_____\"\n ],\n [\n \"paths = {\\n 'WORKSPACE_PATH': os.path.join('Tensorflow', 'workspace'),\\n 'SCRIPTS_PATH': os.path.join('Tensorflow','scripts'),\\n 'APIMODEL_PATH': os.path.join('Tensorflow','models'),\\n 'ANNOTATION_PATH': os.path.join('Tensorflow', 'workspace','annotations'),\\n 'IMAGE_PATH': os.path.join('Tensorflow', 'workspace','images'),\\n 'MODEL_PATH': os.path.join('Tensorflow', 'workspace','models'),\\n 'PRETRAINED_MODEL_PATH': os.path.join('Tensorflow', 'workspace','pre-trained-models'),\\n 'CHECKPOINT_PATH': os.path.join('Tensorflow', 'workspace','models',CUSTOM_MODEL_NAME), \\n 'OUTPUT_PATH': os.path.join('Tensorflow', 'workspace','models',CUSTOM_MODEL_NAME, 'export'), \\n 'TFJS_PATH':os.path.join('Tensorflow', 'workspace','models',CUSTOM_MODEL_NAME, 'tfjsexport'), \\n 'TFLITE_PATH':os.path.join('Tensorflow', 'workspace','models',CUSTOM_MODEL_NAME, 'tfliteexport'), \\n 'PROTOC_PATH':os.path.join('Tensorflow','protoc')\\n }\",\n \"_____no_output_____\"\n ],\n [\n \"files = {\\n 'PIPELINE_CONFIG':os.path.join('Tensorflow', 'workspace','models', CUSTOM_MODEL_NAME, 'pipeline.config'),\\n 'TF_RECORD_SCRIPT': os.path.join(paths['SCRIPTS_PATH'], TF_RECORD_SCRIPT_NAME), \\n 'LABELMAP': os.path.join(paths['ANNOTATION_PATH'], LABEL_MAP_NAME)\\n}\",\n \"_____no_output_____\"\n ],\n [\n \"for path in paths.values():\\n if not os.path.exists(path):\\n if os.name == 'posix':\\n !mkdir -p {path}\\n if os.name == 'nt':\\n !mkdir {path}\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# 1. Download TF Models Pretrained Models from Tensorflow Model Zoo and Install TFOD\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# https://www.tensorflow.org/install/source_windows\",\n \"_____no_output_____\"\n ],\n [\n \"if os.name=='nt':\\n !pip install wget\\n import wget\",\n \"_____no_output_____\"\n ],\n [\n \"if not os.path.exists(os.path.join(paths['APIMODEL_PATH'], 'research', 'object_detection')):\\n !git clone https://github.com/tensorflow/models {paths['APIMODEL_PATH']}\",\n \"_____no_output_____\"\n ],\n [\n \"# Install Tensorflow Object Detection \\nif os.name=='posix': \\n !apt-get install protobuf-compiler\\n !cd Tensorflow/models/research && protoc object_detection/protos/*.proto --python_out=. && cp object_detection/packages/tf2/setup.py . && python -m pip install . \\n \\nif os.name=='nt':\\n url=\\\"https://github.com/protocolbuffers/protobuf/releases/download/v3.15.6/protoc-3.15.6-win64.zip\\\"\\n wget.download(url)\\n !move protoc-3.15.6-win64.zip {paths['PROTOC_PATH']}\\n !cd {paths['PROTOC_PATH']} && tar -xf protoc-3.15.6-win64.zip\\n os.environ['PATH'] += os.pathsep + os.path.abspath(os.path.join(paths['PROTOC_PATH'], 'bin')) \\n !cd Tensorflow/models/research && protoc object_detection/protos/*.proto --python_out=. && copy object_detection\\\\\\\\packages\\\\\\\\tf2\\\\\\\\setup.py setup.py && python setup.py build && python setup.py install\\n !cd Tensorflow/models/research/slim && pip install -e . \",\n \"_____no_output_____\"\n ],\n [\n \"VERIFICATION_SCRIPT = os.path.join(paths['APIMODEL_PATH'], 'research', 'object_detection', 'builders', 'model_builder_tf2_test.py')\\n# Verify Installation\\n!python {VERIFICATION_SCRIPT}\",\n \"_____no_output_____\"\n ],\n [\n \"!pip install pyyaml\\n!pip install tensorflow_io\\n!pip install protobuf\\n!pip install scipy\\n!pip install pillow\\n!pip install matplotlib\\n!pip install pandas\\n!pip install pycocotools\",\n \"_____no_output_____\"\n ],\n [\n \"!pip install tensorflow --upgrade\",\n \"_____no_output_____\"\n ],\n [\n \"!pip uninstall protobuf matplotlib -y\\n!pip install protobuf matplotlib==3.2\",\n \"_____no_output_____\"\n ],\n [\n \"import object_detection\",\n \"_____no_output_____\"\n ],\n [\n \"if os.name =='posix':\\n !wget {PRETRAINED_MODEL_URL}\\n !mv {PRETRAINED_MODEL_NAME+'.tar.gz'} {paths['PRETRAINED_MODEL_PATH']}\\n !cd {paths['PRETRAINED_MODEL_PATH']} && tar -zxvf {PRETRAINED_MODEL_NAME+'.tar.gz'}\\nif os.name == 'nt':\\n wget.download(PRETRAINED_MODEL_URL)\\n !move {PRETRAINED_MODEL_NAME+'.tar.gz'} {paths['PRETRAINED_MODEL_PATH']}\\n !cd {paths['PRETRAINED_MODEL_PATH']} && tar -zxvf {PRETRAINED_MODEL_NAME+'.tar.gz'}\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# 2. Create Label Map\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"labels = [{'name':'stone', 'id':1}, {'name':'cloth', 'id':2}, {'name':'scissors', 'id':3}]\\n\\nwith open(files['LABELMAP'], 'w') as f:\\n for label in labels:\\n f.write('item { \\\\n')\\n f.write('\\\\tname:\\\\'{}\\\\'\\\\n'.format(label['name']))\\n f.write('\\\\tid:{}\\\\n'.format(label['id']))\\n f.write('}\\\\n')\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# 3. Create TF records\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# OPTIONAL IF RUNNING ON COLAB\\nARCHIVE_FILES = os.path.join(paths['IMAGE_PATH'], 'archive.tar.gz')\\nif os.path.exists(ARCHIVE_FILES):\\n !tar -zxvf {ARCHIVE_FILES}\",\n \"_____no_output_____\"\n ],\n [\n \"if not os.path.exists(files['TF_RECORD_SCRIPT']):\\n !git clone https://github.com/nicknochnack/GenerateTFRecord {paths['SCRIPTS_PATH']}\",\n \"_____no_output_____\"\n ],\n [\n \"!python {files['TF_RECORD_SCRIPT']} -x {os.path.join(paths['IMAGE_PATH'], 'train')} -l {files['LABELMAP']} -o {os.path.join(paths['ANNOTATION_PATH'], 'train.record')} \\n!python {files['TF_RECORD_SCRIPT']} -x {os.path.join(paths['IMAGE_PATH'], 'test')} -l {files['LABELMAP']} -o {os.path.join(paths['ANNOTATION_PATH'], 'test.record')} \",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# 4. Copy Model Config to Training Folder\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"if os.name =='posix':\\n !cp {os.path.join(paths['PRETRAINED_MODEL_PATH'], PRETRAINED_MODEL_NAME, 'pipeline.config')} {os.path.join(paths['CHECKPOINT_PATH'])}\\nif os.name == 'nt':\\n !copy {os.path.join(paths['PRETRAINED_MODEL_PATH'], PRETRAINED_MODEL_NAME, 'pipeline.config')} {os.path.join(paths['CHECKPOINT_PATH'])}\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# 5. Update Config For Transfer Learning\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"import tensorflow as tf\\nfrom object_detection.utils import config_util\\nfrom object_detection.protos import pipeline_pb2\\nfrom google.protobuf import text_format\",\n \"_____no_output_____\"\n ],\n [\n \"config = config_util.get_configs_from_pipeline_file(files['PIPELINE_CONFIG'])\",\n \"_____no_output_____\"\n ],\n [\n \"config\",\n \"_____no_output_____\"\n ],\n [\n \"pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()\\nwith tf.io.gfile.GFile(files['PIPELINE_CONFIG'], \\\"r\\\") as f: \\n proto_str = f.read() \\n text_format.Merge(proto_str, pipeline_config) \",\n \"_____no_output_____\"\n ],\n [\n \"pipeline_config.model.ssd.num_classes = len(labels)\\npipeline_config.train_config.batch_size = 4\\npipeline_config.train_config.fine_tune_checkpoint = os.path.join(paths['PRETRAINED_MODEL_PATH'], PRETRAINED_MODEL_NAME, 'checkpoint', 'ckpt-0')\\npipeline_config.train_config.fine_tune_checkpoint_type = \\\"detection\\\"\\npipeline_config.train_input_reader.label_map_path= files['LABELMAP']\\npipeline_config.train_input_reader.tf_record_input_reader.input_path[:] = [os.path.join(paths['ANNOTATION_PATH'], 'train.record')]\\npipeline_config.eval_input_reader[0].label_map_path = files['LABELMAP']\\npipeline_config.eval_input_reader[0].tf_record_input_reader.input_path[:] = [os.path.join(paths['ANNOTATION_PATH'], 'test.record')]\",\n \"_____no_output_____\"\n ],\n [\n \"config_text = text_format.MessageToString(pipeline_config) \\nwith tf.io.gfile.GFile(files['PIPELINE_CONFIG'], \\\"wb\\\") as f: \\n f.write(config_text) \",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# 6. Train the model\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"!pip install lvis\\n!pip install gin\\n!pip install gin-config\\n!pip install tensorflow_addons\",\n \"_____no_output_____\"\n ],\n [\n \"TRAINING_SCRIPT = os.path.join(paths['APIMODEL_PATH'], 'research', 'object_detection', 'model_main_tf2.py')\",\n \"_____no_output_____\"\n ],\n [\n \"command = \\\"python {} --model_dir={} --pipeline_config_path={} --num_train_steps=2000\\\".format(TRAINING_SCRIPT, paths['CHECKPOINT_PATH'],files['PIPELINE_CONFIG'])\",\n \"_____no_output_____\"\n ],\n [\n \"print(command)\",\n \"_____no_output_____\"\n ],\n [\n \"#!{command}\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# 7. Evaluate the Model\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"command = \\\"python {} --model_dir={} --pipeline_config_path={} --checkpoint_dir={}\\\".format(TRAINING_SCRIPT, paths['CHECKPOINT_PATH'],files['PIPELINE_CONFIG'], paths['CHECKPOINT_PATH'])\",\n \"_____no_output_____\"\n ],\n [\n \"print(command)\",\n \"_____no_output_____\"\n ],\n [\n \"#!{command}\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# 8. Load Train Model From Checkpoint\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"import os\\nimport tensorflow as tf\\nfrom object_detection.utils import label_map_util\\nfrom object_detection.utils import visualization_utils as viz_utils\\nfrom object_detection.builders import model_builder\\nfrom object_detection.utils import config_util\",\n \"_____no_output_____\"\n ],\n [\n \"# Load pipeline config and build a detection model\\nconfigs = config_util.get_configs_from_pipeline_file(files['PIPELINE_CONFIG'])\\ndetection_model = model_builder.build(model_config=configs['model'], is_training=False)\\n\\n# Restore checkpoint\\nckpt = tf.compat.v2.train.Checkpoint(model=detection_model)\\nckpt.restore(os.path.join(paths['CHECKPOINT_PATH'], 'ckpt-3')).expect_partial()\\n\\n@tf.function\\ndef detect_fn(image):\\n image, shapes = detection_model.preprocess(image)\\n prediction_dict = detection_model.predict(image, shapes)\\n detections = detection_model.postprocess(prediction_dict, shapes)\\n return detections\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# 9. Detect from an Image\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"import cv2 \\nimport numpy as np\\nfrom matplotlib import pyplot as plt\\n%matplotlib inline\",\n \"_____no_output_____\"\n ],\n [\n \"category_index = label_map_util.create_category_index_from_labelmap(files['LABELMAP'])\",\n \"_____no_output_____\"\n ],\n [\n \"IMAGE_PATH = os.path.join(paths['IMAGE_PATH'], 'test', 'scissors.ce01a4a7-a850-11ec-85bd-005056c00008.jpg')\",\n \"_____no_output_____\"\n ],\n [\n \"img = cv2.imread(IMAGE_PATH)\\nimage_np = np.array(img)\\n\\ninput_tensor = tf.convert_to_tensor(np.expand_dims(image_np, 0), dtype=tf.float32)\\ndetections = detect_fn(input_tensor)\\n\\nnum_detections = int(detections.pop('num_detections'))\\ndetections = {key: value[0, :num_detections].numpy()\\n for key, value in detections.items()}\\ndetections['num_detections'] = num_detections\\n\\n# detection_classes should be ints.\\ndetections['detection_classes'] = detections['detection_classes'].astype(np.int64)\\n\\nlabel_id_offset = 1\\nimage_np_with_detections = image_np.copy()\\n\\nviz_utils.visualize_boxes_and_labels_on_image_array(\\n image_np_with_detections,\\n detections['detection_boxes'],\\n detections['detection_classes']+label_id_offset,\\n detections['detection_scores'],\\n category_index,\\n use_normalized_coordinates=True,\\n max_boxes_to_draw=5,\\n min_score_thresh=.8,\\n agnostic_mode=False)\\n\\nplt.imshow(cv2.cvtColor(image_np_with_detections, cv2.COLOR_BGR2RGB))\\nplt.show()\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# 10. Real Time Detections from your Webcam\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"!pip uninstall opencv-python-headless -y\",\n \"_____no_output_____\"\n ],\n [\n \"cap = cv2.VideoCapture(0)\\nwidth = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\\nheight = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\\n\\nwhile cap.isOpened(): \\n ret, frame = cap.read()\\n frame = cv2.flip(frame,1,dst=None) #水平镜像\\n image_np = np.array(frame)\\n \\n input_tensor = tf.convert_to_tensor(np.expand_dims(image_np, 0), dtype=tf.float32)\\n detections = detect_fn(input_tensor)\\n \\n num_detections = int(detections.pop('num_detections'))\\n detections = {key: value[0, :num_detections].numpy()\\n for key, value in detections.items()}\\n detections['num_detections'] = num_detections\\n\\n # detection_classes should be ints.\\n detections['detection_classes'] = detections['detection_classes'].astype(np.int64)\\n\\n label_id_offset = 1\\n image_np_with_detections = image_np.copy()\\n\\n viz_utils.visualize_boxes_and_labels_on_image_array(\\n image_np_with_detections,\\n detections['detection_boxes'],\\n detections['detection_classes']+label_id_offset,\\n detections['detection_scores'],\\n category_index,\\n use_normalized_coordinates=True,\\n max_boxes_to_draw=5,\\n min_score_thresh=.8,\\n agnostic_mode=False)\\n\\n cv2.imshow('object detection', cv2.resize(image_np_with_detections, (800, 600)))\\n \\n if cv2.waitKey(10) & 0xFF == ord('q'):\\n cap.release()\\n cv2.destroyAllWindows()\\n break\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# 10. Freezing the Graph\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"FREEZE_SCRIPT = os.path.join(paths['APIMODEL_PATH'], 'research', 'object_detection', 'exporter_main_v2.py ')\",\n \"_____no_output_____\"\n ],\n [\n \"FREEZE_SCRIPT\",\n \"_____no_output_____\"\n ],\n [\n \"command = \\\"python {} --input_type=image_tensor --pipeline_config_path={} --trained_checkpoint_dir={} --output_directory={}\\\".format(FREEZE_SCRIPT ,files['PIPELINE_CONFIG'], paths['CHECKPOINT_PATH'], paths['OUTPUT_PATH'])\",\n \"_____no_output_____\"\n ],\n [\n \"print(command)\",\n \"_____no_output_____\"\n ],\n [\n \"!{command}\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# 11. Conversion to TFJS\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"!pip install tensorflowjs\",\n \"_____no_output_____\"\n ],\n [\n \"command = \\\"tensorflowjs_converter --input_format=tf_saved_model --output_node_names='detection_boxes,detection_classes,detection_features,detection_multiclass_scores,detection_scores,num_detections,raw_detection_boxes,raw_detection_scores' --output_format=tfjs_graph_model --signature_name=serving_default {} {}\\\".format(os.path.join(paths['OUTPUT_PATH'], 'saved_model'), paths['TFJS_PATH'])\",\n \"_____no_output_____\"\n ],\n [\n \"print(command)\",\n \"_____no_output_____\"\n ],\n [\n \"!{command}\",\n \"_____no_output_____\"\n ],\n [\n \"# Test Code: https://github.com/nicknochnack/RealTimeSignLanguageDetectionwithTFJS\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# 12. Conversion to TFLite\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"TFLITE_SCRIPT = os.path.join(paths['APIMODEL_PATH'], 'research', 'object_detection', 'export_tflite_graph_tf2.py ')\",\n \"_____no_output_____\"\n ],\n [\n \"command = \\\"python {} --pipeline_config_path={} --trained_checkpoint_dir={} --output_directory={}\\\".format(TFLITE_SCRIPT ,files['PIPELINE_CONFIG'], paths['CHECKPOINT_PATH'], paths['TFLITE_PATH'])\",\n \"_____no_output_____\"\n ],\n [\n \"print(command)\",\n \"_____no_output_____\"\n ],\n [\n \"!{command}\",\n \"_____no_output_____\"\n ],\n [\n \"FROZEN_TFLITE_PATH = os.path.join(paths['TFLITE_PATH'], 'saved_model')\\nTFLITE_MODEL = os.path.join(paths['TFLITE_PATH'], 'saved_model', 'detect.tflite')\",\n \"_____no_output_____\"\n ],\n [\n \"command = \\\"tflite_convert \\\\\\n--saved_model_dir={} \\\\\\n--output_file={} \\\\\\n--input_shapes=1,300,300,3 \\\\\\n--input_arrays=normalized_input_image_tensor \\\\\\n--output_arrays='TFLite_Detection_PostProcess','TFLite_Detection_PostProcess:1','TFLite_Detection_PostProcess:2','TFLite_Detection_PostProcess:3' \\\\\\n--inference_type=FLOAT \\\\\\n--allow_custom_ops\\\".format(FROZEN_TFLITE_PATH, TFLITE_MODEL, )\",\n \"_____no_output_____\"\n ],\n [\n \"print(command)\",\n \"_____no_output_____\"\n ],\n [\n \"!{command}\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# 13. Zip and Export Models \",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"!tar -czf models.tar.gz {paths['CHECKPOINT_PATH']}\",\n \"_____no_output_____\"\n ],\n [\n \"from google.colab import drive\\ndrive.mount('/content/drive')\",\n \"_____no_output_____\"\n ]\n ]\n]"},"cell_types":{"kind":"list like","value":["markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code"],"string":"[\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\"\n]"},"cell_type_groups":{"kind":"list like","value":[["markdown"],["code","code","code","code","code"],["markdown"],["code","code","code","code","code","code","code","code","code","code"],["markdown"],["code"],["markdown"],["code","code","code"],["markdown"],["code"],["markdown"],["code","code","code","code","code","code"],["markdown"],["code","code","code","code","code"],["markdown"],["code","code","code"],["markdown"],["code","code"],["markdown"],["code","code","code","code"],["markdown"],["code","code"],["markdown"],["code","code","code","code","code"],["markdown"],["code","code","code","code","code"],["markdown"],["code","code","code","code","code","code","code","code"],["markdown"],["code","code"]],"string":"[\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\"\n ]\n]"}}},{"rowIdx":1458821,"cells":{"hexsha":{"kind":"string","value":"e7e39ef8051b83883f86823c1a9e8cf66a2515f7"},"size":{"kind":"number","value":78531,"string":"78,531"},"ext":{"kind":"string","value":"ipynb"},"lang":{"kind":"string","value":"Jupyter Notebook"},"max_stars_repo_path":{"kind":"string","value":"notebooks/FlairLMTrain.ipynb"},"max_stars_repo_name":{"kind":"string","value":"randomunrandom/Emojify"},"max_stars_repo_head_hexsha":{"kind":"string","value":"bb6591314fb158617a031f506364315d45dbed4c"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":2,"string":"2"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2019-04-25T15:38:32.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2019-05-11T19:36:39.000Z"},"max_issues_repo_path":{"kind":"string","value":"notebooks/FlairLMTrain.ipynb"},"max_issues_repo_name":{"kind":"string","value":"randomunrandom/Emojify"},"max_issues_repo_head_hexsha":{"kind":"string","value":"bb6591314fb158617a031f506364315d45dbed4c"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"notebooks/FlairLMTrain.ipynb"},"max_forks_repo_name":{"kind":"string","value":"randomunrandom/Emojify"},"max_forks_repo_head_hexsha":{"kind":"string","value":"bb6591314fb158617a031f506364315d45dbed4c"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"avg_line_length":{"kind":"number","value":96.7130541872,"string":"96.713054"},"max_line_length":{"kind":"number","value":1099,"string":"1,099"},"alphanum_fraction":{"kind":"number","value":0.5595115305,"string":"0.559512"},"cells":{"kind":"list like","value":[[["from pathlib import Path\n\nfrom flair.data import Dictionary\nfrom flair.models import LanguageModel\nfrom flair.trainers.language_model_trainer import LanguageModelTrainer, TextCorpus","_____no_output_____"],["is_forward_lm = True\nhidden_dim = 1024\nn_layers = 1\nseq_len = 40\nbatch_size = 64\n\n# load the default character dictionary\ndictionary: Dictionary = Dictionary.load('chars')\n\n# get your corpus, process forward and at the character level\ncorpus = TextCorpus(\n Path('../data/demojized_coprus'),\n dictionary,\n is_forward_lm,\n character_level=True\n)","2019-05-12 21:25:43,827 https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/models/common_characters not found in cache, downloading to /tmp/tmpzlvqw4ge\n"],["language_model = LanguageModel(\n dictionary,\n is_forward_lm,\n hidden_size=hidden_dim,\n nlayers=n_layers\n)","_____no_output_____"],["trainer = LanguageModelTrainer(language_model, corpus)","_____no_output_____"],["trainer.train(\n '../models/flair-lm',\n sequence_length=seq_len,\n mini_batch_size=batch_size,\n max_epochs=150\n)","2019-05-12 21:41:40,265 read text file with 450235 lines\n2019-05-12 21:41:40,271 read text file with 450476 lines\n2019-05-12 21:41:40,547 shuffled\n2019-05-12 21:41:40,564 shuffled\n2019-05-12 21:44:59,616 read text file with 450002 lines\n2019-05-12 21:44:59,897 shuffled\n2019-05-12 21:45:01,365 Sequence length is 40\n2019-05-12 21:45:01,380 read text file with 449863 lines\n2019-05-12 21:45:01,656 shuffled\n2019-05-12 21:45:01,932 Split 1\t - (21:45:01)\n2019-05-12 21:45:03,956 | split 1 / 30 | 100/11313 batches | ms/batch 20.21 | loss 3.83 | ppl 46.07\n2019-05-12 21:45:05,933 | split 1 / 30 | 200/11313 batches | ms/batch 19.75 | loss 2.99 | ppl 19.82\n2019-05-12 21:45:07,897 | split 1 / 30 | 300/11313 batches | ms/batch 19.63 | loss 2.67 | ppl 14.37\n2019-05-12 21:45:09,870 | split 1 / 30 | 400/11313 batches | ms/batch 19.71 | loss 2.42 | ppl 11.29\n2019-05-12 21:45:11,830 | split 1 / 30 | 500/11313 batches | ms/batch 19.59 | loss 2.26 | ppl 9.60\n2019-05-12 21:45:13,793 | split 1 / 30 | 600/11313 batches | ms/batch 19.61 | loss 2.14 | ppl 8.52\n2019-05-12 21:45:15,758 | split 1 / 30 | 700/11313 batches | ms/batch 19.63 | loss 2.07 | ppl 7.89\n2019-05-12 21:45:17,723 | split 1 / 30 | 800/11313 batches | ms/batch 19.63 | loss 2.02 | ppl 7.51\n2019-05-12 21:45:19,687 | split 1 / 30 | 900/11313 batches | ms/batch 19.62 | loss 1.96 | ppl 7.12\n2019-05-12 21:45:21,657 | split 1 / 30 | 1000/11313 batches | ms/batch 19.68 | loss 1.92 | ppl 6.82\n2019-05-12 21:45:23,622 | split 1 / 30 | 1100/11313 batches | ms/batch 19.63 | loss 1.89 | ppl 6.64\n2019-05-12 21:45:25,588 | split 1 / 30 | 1200/11313 batches | ms/batch 19.64 | loss 1.87 | ppl 6.46\n2019-05-12 21:45:27,553 | split 1 / 30 | 1300/11313 batches | ms/batch 19.63 | loss 1.83 | ppl 6.21\n2019-05-12 21:45:29,524 | split 1 / 30 | 1400/11313 batches | ms/batch 19.69 | loss 1.81 | ppl 6.12\n2019-05-12 21:45:31,487 | split 1 / 30 | 1500/11313 batches | ms/batch 19.62 | loss 1.80 | ppl 6.02\n2019-05-12 21:45:33,451 | split 1 / 30 | 1600/11313 batches | ms/batch 19.62 | loss 1.77 | ppl 5.86\n2019-05-12 21:45:35,414 | split 1 / 30 | 1700/11313 batches | ms/batch 19.62 | loss 1.75 | ppl 5.75\n2019-05-12 21:45:37,380 | split 1 / 30 | 1800/11313 batches | ms/batch 19.65 | loss 1.74 | ppl 5.72\n2019-05-12 21:45:39,342 | split 1 / 30 | 1900/11313 batches | ms/batch 19.61 | loss 1.73 | ppl 5.66\n2019-05-12 21:45:41,308 | split 1 / 30 | 2000/11313 batches | ms/batch 19.64 | loss 1.72 | ppl 5.56\n2019-05-12 21:45:43,272 | split 1 / 30 | 2100/11313 batches | ms/batch 19.63 | loss 1.70 | ppl 5.47\n2019-05-12 21:45:45,234 | split 1 / 30 | 2200/11313 batches | ms/batch 19.60 | loss 1.69 | ppl 5.43\n2019-05-12 21:45:47,198 | split 1 / 30 | 2300/11313 batches | ms/batch 19.62 | loss 1.68 | ppl 5.37\n2019-05-12 21:45:49,159 | split 1 / 30 | 2400/11313 batches | ms/batch 19.60 | loss 1.68 | ppl 5.34\n2019-05-12 21:45:51,123 | split 1 / 30 | 2500/11313 batches | ms/batch 19.62 | loss 1.67 | ppl 5.30\n2019-05-12 21:45:53,092 | split 1 / 30 | 2600/11313 batches | ms/batch 19.67 | loss 1.66 | ppl 5.27\n2019-05-12 21:45:55,057 | split 1 / 30 | 2700/11313 batches | ms/batch 19.63 | loss 1.65 | ppl 5.22\n2019-05-12 21:45:57,026 | split 1 / 30 | 2800/11313 batches | ms/batch 19.68 | loss 1.63 | ppl 5.10\n2019-05-12 21:45:58,988 | split 1 / 30 | 2900/11313 batches | ms/batch 19.61 | loss 1.64 | ppl 5.13\n2019-05-12 21:46:00,951 | split 1 / 30 | 3000/11313 batches | ms/batch 19.62 | loss 1.65 | ppl 5.19\n2019-05-12 21:46:02,917 | split 1 / 30 | 3100/11313 batches | ms/batch 19.63 | loss 1.62 | ppl 5.06\n2019-05-12 21:46:04,872 | split 1 / 30 | 3200/11313 batches | ms/batch 19.54 | loss 1.63 | ppl 5.12\n2019-05-12 21:46:06,831 | split 1 / 30 | 3300/11313 batches | ms/batch 19.57 | loss 1.61 | ppl 5.00\n2019-05-12 21:46:08,799 | split 1 / 30 | 3400/11313 batches | ms/batch 19.67 | loss 1.61 | ppl 4.99\n2019-05-12 21:46:10,764 | split 1 / 30 | 3500/11313 batches | ms/batch 19.63 | loss 1.61 | ppl 5.00\n2019-05-12 21:46:12,727 | split 1 / 30 | 3600/11313 batches | ms/batch 19.61 | loss 1.61 | ppl 4.99\n2019-05-12 21:46:14,693 | split 1 / 30 | 3700/11313 batches | ms/batch 19.64 | loss 1.60 | ppl 4.97\n2019-05-12 21:46:16,664 | split 1 / 30 | 3800/11313 batches | ms/batch 19.69 | loss 1.60 | ppl 4.98\n2019-05-12 21:46:18,628 | split 1 / 30 | 3900/11313 batches | ms/batch 19.63 | loss 1.59 | ppl 4.92\n2019-05-12 21:46:20,599 | split 1 / 30 | 4000/11313 batches | ms/batch 19.70 | loss 1.59 | ppl 4.91\n2019-05-12 21:46:22,559 | split 1 / 30 | 4100/11313 batches | ms/batch 19.58 | loss 1.59 | ppl 4.90\n2019-05-12 21:46:24,527 | split 1 / 30 | 4200/11313 batches | ms/batch 19.66 | loss 1.57 | ppl 4.82\n2019-05-12 21:46:26,491 | split 1 / 30 | 4300/11313 batches | ms/batch 19.63 | loss 1.58 | ppl 4.87\n2019-05-12 21:46:28,459 | split 1 / 30 | 4400/11313 batches | ms/batch 19.67 | loss 1.57 | ppl 4.81\n2019-05-12 21:46:30,420 | split 1 / 30 | 4500/11313 batches | ms/batch 19.59 | loss 1.57 | ppl 4.80\n2019-05-12 21:46:32,404 | split 1 / 30 | 4600/11313 batches | ms/batch 19.83 | loss 1.56 | ppl 4.77\n2019-05-12 21:46:34,372 | split 1 / 30 | 4700/11313 batches | ms/batch 19.66 | loss 1.56 | ppl 4.75\n2019-05-12 21:46:36,338 | split 1 / 30 | 4800/11313 batches | ms/batch 19.64 | loss 1.55 | ppl 4.73\n2019-05-12 21:46:38,303 | split 1 / 30 | 4900/11313 batches | ms/batch 19.64 | loss 1.56 | ppl 4.74\n2019-05-12 21:46:40,272 | split 1 / 30 | 5000/11313 batches | ms/batch 19.67 | loss 1.55 | ppl 4.73\n2019-05-12 21:46:42,241 | split 1 / 30 | 5100/11313 batches | ms/batch 19.67 | loss 1.55 | ppl 4.71\n2019-05-12 21:46:44,206 | split 1 / 30 | 5200/11313 batches | ms/batch 19.63 | loss 1.55 | ppl 4.70\n2019-05-12 21:46:46,173 | split 1 / 30 | 5300/11313 batches | ms/batch 19.65 | loss 1.56 | ppl 4.74\n2019-05-12 21:46:48,144 | split 1 / 30 | 5400/11313 batches | ms/batch 19.70 | loss 1.55 | ppl 4.73\n2019-05-12 21:46:50,111 | split 1 / 30 | 5500/11313 batches | ms/batch 19.66 | loss 1.53 | ppl 4.64\n2019-05-12 21:46:52,080 | split 1 / 30 | 5600/11313 batches | ms/batch 19.67 | loss 1.53 | ppl 4.61\n2019-05-12 21:46:54,049 | split 1 / 30 | 5700/11313 batches | ms/batch 19.67 | loss 1.54 | ppl 4.65\n2019-05-12 21:46:56,015 | split 1 / 30 | 5800/11313 batches | ms/batch 19.65 | loss 1.53 | ppl 4.64\n2019-05-12 21:46:57,982 | split 1 / 30 | 5900/11313 batches | ms/batch 19.65 | loss 1.53 | ppl 4.64\n2019-05-12 21:46:59,948 | split 1 / 30 | 6000/11313 batches | ms/batch 19.65 | loss 1.53 | ppl 4.63\n2019-05-12 21:47:01,918 | split 1 / 30 | 6100/11313 batches | ms/batch 19.68 | loss 1.53 | ppl 4.62\n2019-05-12 21:47:03,885 | split 1 / 30 | 6200/11313 batches | ms/batch 19.65 | loss 1.54 | ppl 4.65\n2019-05-12 21:47:05,859 | split 1 / 30 | 6300/11313 batches | ms/batch 19.73 | loss 1.52 | ppl 4.59\n2019-05-12 21:47:07,826 | split 1 / 30 | 6400/11313 batches | ms/batch 19.65 | loss 1.53 | ppl 4.64\n2019-05-12 21:47:09,804 | split 1 / 30 | 6500/11313 batches | ms/batch 19.77 | loss 1.52 | ppl 4.56\n2019-05-12 21:47:11,774 | split 1 / 30 | 6600/11313 batches | ms/batch 19.69 | loss 1.52 | ppl 4.60\n2019-05-12 21:47:13,740 | split 1 / 30 | 6700/11313 batches | ms/batch 19.63 | loss 1.51 | ppl 4.55\n2019-05-12 21:47:15,704 | split 1 / 30 | 6800/11313 batches | ms/batch 19.63 | loss 1.52 | ppl 4.55\n2019-05-12 21:47:17,667 | split 1 / 30 | 6900/11313 batches | ms/batch 19.61 | loss 1.52 | ppl 4.56\n2019-05-12 21:47:19,633 | split 1 / 30 | 7000/11313 batches | ms/batch 19.64 | loss 1.52 | ppl 4.58\n2019-05-12 21:47:21,600 | split 1 / 30 | 7100/11313 batches | ms/batch 19.65 | loss 1.52 | ppl 4.56\n2019-05-12 21:47:23,565 | split 1 / 30 | 7200/11313 batches | ms/batch 19.63 | loss 1.51 | ppl 4.51\n"]]],"string":"[\n [\n [\n \"from pathlib import Path\\n\\nfrom flair.data import Dictionary\\nfrom flair.models import LanguageModel\\nfrom flair.trainers.language_model_trainer import LanguageModelTrainer, TextCorpus\",\n \"_____no_output_____\"\n ],\n [\n \"is_forward_lm = True\\nhidden_dim = 1024\\nn_layers = 1\\nseq_len = 40\\nbatch_size = 64\\n\\n# load the default character dictionary\\ndictionary: Dictionary = Dictionary.load('chars')\\n\\n# get your corpus, process forward and at the character level\\ncorpus = TextCorpus(\\n Path('../data/demojized_coprus'),\\n dictionary,\\n is_forward_lm,\\n character_level=True\\n)\",\n \"2019-05-12 21:25:43,827 https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/models/common_characters not found in cache, downloading to /tmp/tmpzlvqw4ge\\n\"\n ],\n [\n \"language_model = LanguageModel(\\n dictionary,\\n is_forward_lm,\\n hidden_size=hidden_dim,\\n nlayers=n_layers\\n)\",\n \"_____no_output_____\"\n ],\n [\n \"trainer = LanguageModelTrainer(language_model, corpus)\",\n \"_____no_output_____\"\n ],\n [\n \"trainer.train(\\n '../models/flair-lm',\\n sequence_length=seq_len,\\n mini_batch_size=batch_size,\\n max_epochs=150\\n)\",\n \"2019-05-12 21:41:40,265 read text file with 450235 lines\\n2019-05-12 21:41:40,271 read text file with 450476 lines\\n2019-05-12 21:41:40,547 shuffled\\n2019-05-12 21:41:40,564 shuffled\\n2019-05-12 21:44:59,616 read text file with 450002 lines\\n2019-05-12 21:44:59,897 shuffled\\n2019-05-12 21:45:01,365 Sequence length is 40\\n2019-05-12 21:45:01,380 read text file with 449863 lines\\n2019-05-12 21:45:01,656 shuffled\\n2019-05-12 21:45:01,932 Split 1\\t - (21:45:01)\\n2019-05-12 21:45:03,956 | split 1 / 30 | 100/11313 batches | ms/batch 20.21 | loss 3.83 | ppl 46.07\\n2019-05-12 21:45:05,933 | split 1 / 30 | 200/11313 batches | ms/batch 19.75 | loss 2.99 | ppl 19.82\\n2019-05-12 21:45:07,897 | split 1 / 30 | 300/11313 batches | ms/batch 19.63 | loss 2.67 | ppl 14.37\\n2019-05-12 21:45:09,870 | split 1 / 30 | 400/11313 batches | ms/batch 19.71 | loss 2.42 | ppl 11.29\\n2019-05-12 21:45:11,830 | split 1 / 30 | 500/11313 batches | ms/batch 19.59 | loss 2.26 | ppl 9.60\\n2019-05-12 21:45:13,793 | split 1 / 30 | 600/11313 batches | ms/batch 19.61 | loss 2.14 | ppl 8.52\\n2019-05-12 21:45:15,758 | split 1 / 30 | 700/11313 batches | ms/batch 19.63 | loss 2.07 | ppl 7.89\\n2019-05-12 21:45:17,723 | split 1 / 30 | 800/11313 batches | ms/batch 19.63 | loss 2.02 | ppl 7.51\\n2019-05-12 21:45:19,687 | split 1 / 30 | 900/11313 batches | ms/batch 19.62 | loss 1.96 | ppl 7.12\\n2019-05-12 21:45:21,657 | split 1 / 30 | 1000/11313 batches | ms/batch 19.68 | loss 1.92 | ppl 6.82\\n2019-05-12 21:45:23,622 | split 1 / 30 | 1100/11313 batches | ms/batch 19.63 | loss 1.89 | ppl 6.64\\n2019-05-12 21:45:25,588 | split 1 / 30 | 1200/11313 batches | ms/batch 19.64 | loss 1.87 | ppl 6.46\\n2019-05-12 21:45:27,553 | split 1 / 30 | 1300/11313 batches | ms/batch 19.63 | loss 1.83 | ppl 6.21\\n2019-05-12 21:45:29,524 | split 1 / 30 | 1400/11313 batches | ms/batch 19.69 | loss 1.81 | ppl 6.12\\n2019-05-12 21:45:31,487 | split 1 / 30 | 1500/11313 batches | ms/batch 19.62 | loss 1.80 | ppl 6.02\\n2019-05-12 21:45:33,451 | split 1 / 30 | 1600/11313 batches | ms/batch 19.62 | loss 1.77 | ppl 5.86\\n2019-05-12 21:45:35,414 | split 1 / 30 | 1700/11313 batches | ms/batch 19.62 | loss 1.75 | ppl 5.75\\n2019-05-12 21:45:37,380 | split 1 / 30 | 1800/11313 batches | ms/batch 19.65 | loss 1.74 | ppl 5.72\\n2019-05-12 21:45:39,342 | split 1 / 30 | 1900/11313 batches | ms/batch 19.61 | loss 1.73 | ppl 5.66\\n2019-05-12 21:45:41,308 | split 1 / 30 | 2000/11313 batches | ms/batch 19.64 | loss 1.72 | ppl 5.56\\n2019-05-12 21:45:43,272 | split 1 / 30 | 2100/11313 batches | ms/batch 19.63 | loss 1.70 | ppl 5.47\\n2019-05-12 21:45:45,234 | split 1 / 30 | 2200/11313 batches | ms/batch 19.60 | loss 1.69 | ppl 5.43\\n2019-05-12 21:45:47,198 | split 1 / 30 | 2300/11313 batches | ms/batch 19.62 | loss 1.68 | ppl 5.37\\n2019-05-12 21:45:49,159 | split 1 / 30 | 2400/11313 batches | ms/batch 19.60 | loss 1.68 | ppl 5.34\\n2019-05-12 21:45:51,123 | split 1 / 30 | 2500/11313 batches | ms/batch 19.62 | loss 1.67 | ppl 5.30\\n2019-05-12 21:45:53,092 | split 1 / 30 | 2600/11313 batches | ms/batch 19.67 | loss 1.66 | ppl 5.27\\n2019-05-12 21:45:55,057 | split 1 / 30 | 2700/11313 batches | ms/batch 19.63 | loss 1.65 | ppl 5.22\\n2019-05-12 21:45:57,026 | split 1 / 30 | 2800/11313 batches | ms/batch 19.68 | loss 1.63 | ppl 5.10\\n2019-05-12 21:45:58,988 | split 1 / 30 | 2900/11313 batches | ms/batch 19.61 | loss 1.64 | ppl 5.13\\n2019-05-12 21:46:00,951 | split 1 / 30 | 3000/11313 batches | ms/batch 19.62 | loss 1.65 | ppl 5.19\\n2019-05-12 21:46:02,917 | split 1 / 30 | 3100/11313 batches | ms/batch 19.63 | loss 1.62 | ppl 5.06\\n2019-05-12 21:46:04,872 | split 1 / 30 | 3200/11313 batches | ms/batch 19.54 | loss 1.63 | ppl 5.12\\n2019-05-12 21:46:06,831 | split 1 / 30 | 3300/11313 batches | ms/batch 19.57 | loss 1.61 | ppl 5.00\\n2019-05-12 21:46:08,799 | split 1 / 30 | 3400/11313 batches | ms/batch 19.67 | loss 1.61 | ppl 4.99\\n2019-05-12 21:46:10,764 | split 1 / 30 | 3500/11313 batches | ms/batch 19.63 | loss 1.61 | ppl 5.00\\n2019-05-12 21:46:12,727 | split 1 / 30 | 3600/11313 batches | ms/batch 19.61 | loss 1.61 | ppl 4.99\\n2019-05-12 21:46:14,693 | split 1 / 30 | 3700/11313 batches | ms/batch 19.64 | loss 1.60 | ppl 4.97\\n2019-05-12 21:46:16,664 | split 1 / 30 | 3800/11313 batches | ms/batch 19.69 | loss 1.60 | ppl 4.98\\n2019-05-12 21:46:18,628 | split 1 / 30 | 3900/11313 batches | ms/batch 19.63 | loss 1.59 | ppl 4.92\\n2019-05-12 21:46:20,599 | split 1 / 30 | 4000/11313 batches | ms/batch 19.70 | loss 1.59 | ppl 4.91\\n2019-05-12 21:46:22,559 | split 1 / 30 | 4100/11313 batches | ms/batch 19.58 | loss 1.59 | ppl 4.90\\n2019-05-12 21:46:24,527 | split 1 / 30 | 4200/11313 batches | ms/batch 19.66 | loss 1.57 | ppl 4.82\\n2019-05-12 21:46:26,491 | split 1 / 30 | 4300/11313 batches | ms/batch 19.63 | loss 1.58 | ppl 4.87\\n2019-05-12 21:46:28,459 | split 1 / 30 | 4400/11313 batches | ms/batch 19.67 | loss 1.57 | ppl 4.81\\n2019-05-12 21:46:30,420 | split 1 / 30 | 4500/11313 batches | ms/batch 19.59 | loss 1.57 | ppl 4.80\\n2019-05-12 21:46:32,404 | split 1 / 30 | 4600/11313 batches | ms/batch 19.83 | loss 1.56 | ppl 4.77\\n2019-05-12 21:46:34,372 | split 1 / 30 | 4700/11313 batches | ms/batch 19.66 | loss 1.56 | ppl 4.75\\n2019-05-12 21:46:36,338 | split 1 / 30 | 4800/11313 batches | ms/batch 19.64 | loss 1.55 | ppl 4.73\\n2019-05-12 21:46:38,303 | split 1 / 30 | 4900/11313 batches | ms/batch 19.64 | loss 1.56 | ppl 4.74\\n2019-05-12 21:46:40,272 | split 1 / 30 | 5000/11313 batches | ms/batch 19.67 | loss 1.55 | ppl 4.73\\n2019-05-12 21:46:42,241 | split 1 / 30 | 5100/11313 batches | ms/batch 19.67 | loss 1.55 | ppl 4.71\\n2019-05-12 21:46:44,206 | split 1 / 30 | 5200/11313 batches | ms/batch 19.63 | loss 1.55 | ppl 4.70\\n2019-05-12 21:46:46,173 | split 1 / 30 | 5300/11313 batches | ms/batch 19.65 | loss 1.56 | ppl 4.74\\n2019-05-12 21:46:48,144 | split 1 / 30 | 5400/11313 batches | ms/batch 19.70 | loss 1.55 | ppl 4.73\\n2019-05-12 21:46:50,111 | split 1 / 30 | 5500/11313 batches | ms/batch 19.66 | loss 1.53 | ppl 4.64\\n2019-05-12 21:46:52,080 | split 1 / 30 | 5600/11313 batches | ms/batch 19.67 | loss 1.53 | ppl 4.61\\n2019-05-12 21:46:54,049 | split 1 / 30 | 5700/11313 batches | ms/batch 19.67 | loss 1.54 | ppl 4.65\\n2019-05-12 21:46:56,015 | split 1 / 30 | 5800/11313 batches | ms/batch 19.65 | loss 1.53 | ppl 4.64\\n2019-05-12 21:46:57,982 | split 1 / 30 | 5900/11313 batches | ms/batch 19.65 | loss 1.53 | ppl 4.64\\n2019-05-12 21:46:59,948 | split 1 / 30 | 6000/11313 batches | ms/batch 19.65 | loss 1.53 | ppl 4.63\\n2019-05-12 21:47:01,918 | split 1 / 30 | 6100/11313 batches | ms/batch 19.68 | loss 1.53 | ppl 4.62\\n2019-05-12 21:47:03,885 | split 1 / 30 | 6200/11313 batches | ms/batch 19.65 | loss 1.54 | ppl 4.65\\n2019-05-12 21:47:05,859 | split 1 / 30 | 6300/11313 batches | ms/batch 19.73 | loss 1.52 | ppl 4.59\\n2019-05-12 21:47:07,826 | split 1 / 30 | 6400/11313 batches | ms/batch 19.65 | loss 1.53 | ppl 4.64\\n2019-05-12 21:47:09,804 | split 1 / 30 | 6500/11313 batches | ms/batch 19.77 | loss 1.52 | ppl 4.56\\n2019-05-12 21:47:11,774 | split 1 / 30 | 6600/11313 batches | ms/batch 19.69 | loss 1.52 | ppl 4.60\\n2019-05-12 21:47:13,740 | split 1 / 30 | 6700/11313 batches | ms/batch 19.63 | loss 1.51 | ppl 4.55\\n2019-05-12 21:47:15,704 | split 1 / 30 | 6800/11313 batches | ms/batch 19.63 | loss 1.52 | ppl 4.55\\n2019-05-12 21:47:17,667 | split 1 / 30 | 6900/11313 batches | ms/batch 19.61 | loss 1.52 | ppl 4.56\\n2019-05-12 21:47:19,633 | split 1 / 30 | 7000/11313 batches | ms/batch 19.64 | loss 1.52 | ppl 4.58\\n2019-05-12 21:47:21,600 | split 1 / 30 | 7100/11313 batches | ms/batch 19.65 | loss 1.52 | ppl 4.56\\n2019-05-12 21:47:23,565 | split 1 / 30 | 7200/11313 batches | ms/batch 19.63 | loss 1.51 | ppl 4.51\\n\"\n ]\n ]\n]"},"cell_types":{"kind":"list like","value":["code"],"string":"[\n \"code\"\n]"},"cell_type_groups":{"kind":"list like","value":[["code","code","code","code","code"]],"string":"[\n [\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\"\n ]\n]"}}},{"rowIdx":1458822,"cells":{"hexsha":{"kind":"string","value":"e7e3a56f720223d5c70b47ec5ad154c15e257454"},"size":{"kind":"number","value":6654,"string":"6,654"},"ext":{"kind":"string","value":"ipynb"},"lang":{"kind":"string","value":"Jupyter Notebook"},"max_stars_repo_path":{"kind":"string","value":"Code/day08.ipynb"},"max_stars_repo_name":{"kind":"string","value":"heibanke/learn_python_in_15days"},"max_stars_repo_head_hexsha":{"kind":"string","value":"96658b1b5cd1e532ba57897237cc89f0861e76c2"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"Code/day08.ipynb"},"max_issues_repo_name":{"kind":"string","value":"heibanke/learn_python_in_15days"},"max_issues_repo_head_hexsha":{"kind":"string","value":"96658b1b5cd1e532ba57897237cc89f0861e76c2"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"Code/day08.ipynb"},"max_forks_repo_name":{"kind":"string","value":"heibanke/learn_python_in_15days"},"max_forks_repo_head_hexsha":{"kind":"string","value":"96658b1b5cd1e532ba57897237cc89f0861e76c2"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"avg_line_length":{"kind":"number","value":20.4110429448,"string":"20.411043"},"max_line_length":{"kind":"number","value":91,"string":"91"},"alphanum_fraction":{"kind":"number","value":0.4445446348,"string":"0.444545"},"cells":{"kind":"list like","value":[[["# 15天入门Python3\n\nCopyRight by 黑板客 \n转载请联系heibanke_at_aliyun.com","_____no_output_____"],["**上节作业**\n\n汉诺塔\n\n如何存储和操作数据?","_____no_output_____"]],[["%load day07/hnt.py\n","_____no_output_____"]],[["## day08:生成器—临阵磨枪\n\n1. 生成器\n2. itertools\n4. 作业——八皇后\n\n## 生成器\n\n生成器函数 \n 1) return关键词被yield取代 \n 2) 当调用这个“函数”的时候,它会立即返回一个迭代器,而不立即执行函数内容,直到调用其返回迭代器的next方法是才开始执行,直到遇到yield语句暂停。 \n 3) 继续调用生成器返回的迭代器的next方法,恢复函数执行,直到再次遇到yield语句 \n 4) 如此反复,一直到遇到StopIteration ","_____no_output_____"]],[["# 最简单的例子,产生0~N个整数\n\ndef irange(N):\n a = 0\n while a 1000:\n break\n else:\n print(i)","_____no_output_____"]],[["### 生成器表达式","_____no_output_____"]],[["a = (x**2 for x in range(10))\nnext(a)","_____no_output_____"],["%%timeit -n 1 -r 1\nsum([x**2 for x in range(10000000)])","_____no_output_____"],["%%timeit -n 1 -r 1\nsum(x**2 for x in range(10000000))","_____no_output_____"]],[["### send\n\n生成器可以修改遍历过程,插入指定的数据","_____no_output_____"]],[["def counter(maximum):\n i = 0\n while i < maximum:\n val = (yield i)\n print(\"i=%s, val=%s\"%(i, val))\n # If value provided, change counter\n if val is not None:\n i = val\n else:\n i += 1","_____no_output_____"],["it = counter(10)\nprint(\"yield value: %s\"%(next(it)))\n\nprint(\"yield value: %s\"%(next(it)))\n\nprint(it.send(5))\n\n# 想一下下个print(next(it))会输出什么?","_____no_output_____"]],[["## itertools\n\n1. chain # 将多个生成器串起来\n2. repeat # 重复元素\n3. permutations # 排列,从N个数里取m个,考虑顺序。\n4. combinations # 组合,从N个数里取m个,不考虑顺序。\n5. product # 依次从不同集合里任选一个数。笛卡尔乘积\n\n","_____no_output_____"]],[["import itertools\n\nhorses=[1,2,3,4]\nraces = itertools.permutations(horses,3) \n\na=itertools.product([1,2],[3,4],[5,6])\nb=itertools.repeat([1,2,3],4)\nc=itertools.combinations([1,2,3,4],3)\nd=itertools.chain(races, a, b, c)\n\nprint([i for i in races])\nprint(\"====================\")\nprint([i for i in a])\nprint(\"====================\")\n\nprint([i for i in b])\nprint(\"====================\")\n\nprint([i for i in c])\nprint(\"====================\")\n\nprint([i for i in d])\n","_____no_output_____"]],[["**作业:八皇后问题**\n\n8*8的棋盘上放下8个皇后,彼此吃不到对方。找出所有的位置组合。\n\n1. 棋盘的每一行,每一列,每一个条正斜线,每一条反斜线,都只能有1个皇后\n2. 使用生成器\n3. 支持N皇后\n\n","_____no_output_____"]],[["from day08.eight_queen import gen_n_queen, printsolution\n\nsolves = gen_n_queen(5)","_____no_output_____"],["s = next(solves)\nprint(s)\nprintsolution(s)","_____no_output_____"],["def printsolution(solve):\n n = len(solve)\n sep = \"+\" + \"-+\" * n\n print(sep)\n for i in range(n):\n squares = [\" \" for j in range(n)]\n squares[solve[i]] = \"Q\"\n print(\"|\" + \"|\".join(squares) + \"|\")\n print(sep)","_____no_output_____"]],[["提示:\n\n1. 产生所有的可能(先满足不在同一行,同一列)\n2. 判断是否满足对角条件\n3. 所有条件都满足,则yield输出\n4. next则继续检查剩余的可能\n5. 8皇后的解答有92种。","_____no_output_____"]]],"string":"[\n [\n [\n \"# 15天入门Python3\\n\\nCopyRight by 黑板客 \\n转载请联系heibanke_at_aliyun.com\",\n \"_____no_output_____\"\n ],\n [\n \"**上节作业**\\n\\n汉诺塔\\n\\n如何存储和操作数据?\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"%load day07/hnt.py\\n\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"## day08:生成器—临阵磨枪\\n\\n1. 生成器\\n2. itertools\\n4. 作业——八皇后\\n\\n## 生成器\\n\\n生成器函数 \\n 1) return关键词被yield取代 \\n 2) 当调用这个“函数”的时候,它会立即返回一个迭代器,而不立即执行函数内容,直到调用其返回迭代器的next方法是才开始执行,直到遇到yield语句暂停。 \\n 3) 继续调用生成器返回的迭代器的next方法,恢复函数执行,直到再次遇到yield语句 \\n 4) 如此反复,一直到遇到StopIteration \",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# 最简单的例子,产生0~N个整数\\n\\ndef irange(N):\\n a = 0\\n while a 1000:\\n break\\n else:\\n print(i)\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"### 生成器表达式\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"a = (x**2 for x in range(10))\\nnext(a)\",\n \"_____no_output_____\"\n ],\n [\n \"%%timeit -n 1 -r 1\\nsum([x**2 for x in range(10000000)])\",\n \"_____no_output_____\"\n ],\n [\n \"%%timeit -n 1 -r 1\\nsum(x**2 for x in range(10000000))\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"### send\\n\\n生成器可以修改遍历过程,插入指定的数据\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"def counter(maximum):\\n i = 0\\n while i < maximum:\\n val = (yield i)\\n print(\\\"i=%s, val=%s\\\"%(i, val))\\n # If value provided, change counter\\n if val is not None:\\n i = val\\n else:\\n i += 1\",\n \"_____no_output_____\"\n ],\n [\n \"it = counter(10)\\nprint(\\\"yield value: %s\\\"%(next(it)))\\n\\nprint(\\\"yield value: %s\\\"%(next(it)))\\n\\nprint(it.send(5))\\n\\n# 想一下下个print(next(it))会输出什么?\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"## itertools\\n\\n1. chain # 将多个生成器串起来\\n2. repeat # 重复元素\\n3. permutations # 排列,从N个数里取m个,考虑顺序。\\n4. combinations # 组合,从N个数里取m个,不考虑顺序。\\n5. product # 依次从不同集合里任选一个数。笛卡尔乘积\\n\\n\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"import itertools\\n\\nhorses=[1,2,3,4]\\nraces = itertools.permutations(horses,3) \\n\\na=itertools.product([1,2],[3,4],[5,6])\\nb=itertools.repeat([1,2,3],4)\\nc=itertools.combinations([1,2,3,4],3)\\nd=itertools.chain(races, a, b, c)\\n\\nprint([i for i in races])\\nprint(\\\"====================\\\")\\nprint([i for i in a])\\nprint(\\\"====================\\\")\\n\\nprint([i for i in b])\\nprint(\\\"====================\\\")\\n\\nprint([i for i in c])\\nprint(\\\"====================\\\")\\n\\nprint([i for i in d])\\n\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"**作业:八皇后问题**\\n\\n8*8的棋盘上放下8个皇后,彼此吃不到对方。找出所有的位置组合。\\n\\n1. 棋盘的每一行,每一列,每一个条正斜线,每一条反斜线,都只能有1个皇后\\n2. 使用生成器\\n3. 支持N皇后\\n\\n\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"from day08.eight_queen import gen_n_queen, printsolution\\n\\nsolves = gen_n_queen(5)\",\n \"_____no_output_____\"\n ],\n [\n \"s = next(solves)\\nprint(s)\\nprintsolution(s)\",\n \"_____no_output_____\"\n ],\n [\n \"def printsolution(solve):\\n n = len(solve)\\n sep = \\\"+\\\" + \\\"-+\\\" * n\\n print(sep)\\n for i in range(n):\\n squares = [\\\" \\\" for j in range(n)]\\n squares[solve[i]] = \\\"Q\\\"\\n print(\\\"|\\\" + \\\"|\\\".join(squares) + \\\"|\\\")\\n print(sep)\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"提示:\\n\\n1. 产生所有的可能(先满足不在同一行,同一列)\\n2. 判断是否满足对角条件\\n3. 所有条件都满足,则yield输出\\n4. next则继续检查剩余的可能\\n5. 8皇后的解答有92种。\",\n \"_____no_output_____\"\n ]\n ]\n]"},"cell_types":{"kind":"list like","value":["markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown"],"string":"[\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\"\n]"},"cell_type_groups":{"kind":"list like","value":[["markdown","markdown"],["code"],["markdown"],["code","code"],["markdown"],["code"],["markdown"],["code","code","code"],["markdown"],["code","code"],["markdown"],["code"],["markdown"],["code","code","code"],["markdown"]],"string":"[\n [\n \"markdown\",\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ]\n]"}}},{"rowIdx":1458823,"cells":{"hexsha":{"kind":"string","value":"e7e3a7c5073cdef68be74018b6cae31e6fd16254"},"size":{"kind":"number","value":15972,"string":"15,972"},"ext":{"kind":"string","value":"ipynb"},"lang":{"kind":"string","value":"Jupyter Notebook"},"max_stars_repo_path":{"kind":"string","value":"savinov-vlad/hw1.ipynb"},"max_stars_repo_name":{"kind":"string","value":"dingearteom/co-mkn-hw-2021"},"max_stars_repo_head_hexsha":{"kind":"string","value":"58e918ee8ef7cfddac48ed8d5c2fd211f599c8a8"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"savinov-vlad/hw1.ipynb"},"max_issues_repo_name":{"kind":"string","value":"dingearteom/co-mkn-hw-2021"},"max_issues_repo_head_hexsha":{"kind":"string","value":"58e918ee8ef7cfddac48ed8d5c2fd211f599c8a8"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"savinov-vlad/hw1.ipynb"},"max_forks_repo_name":{"kind":"string","value":"dingearteom/co-mkn-hw-2021"},"max_forks_repo_head_hexsha":{"kind":"string","value":"58e918ee8ef7cfddac48ed8d5c2fd211f599c8a8"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"avg_line_length":{"kind":"number","value":26.934232715,"string":"26.934233"},"max_line_length":{"kind":"number","value":137,"string":"137"},"alphanum_fraction":{"kind":"number","value":0.4576759329,"string":"0.457676"},"cells":{"kind":"list like","value":[[["## Вычислить $ \\sqrt[k]{a} $ ","_____no_output_____"]],[["import numpy as np\n\ndef printable_test(a, k, f, prc=1e-4):\n ans = f(a, k)\n print(f'Our result: {a}^(1/{k}) ~ {ans:.10f}')\n print(f'True result: {a**(1/k):.10f}\\n')\n print(f'Approx a ~ {ans**k:.10f}')\n print(f'True a = {a}')\n assert abs(a - ans**k) < prc, f'the answer differs by {abs(a - ans**k):.10f} from the true one'\n \ndef not_printable_test(a, k, f, prc=1e-4):\n ans = f(a, k)\n assert abs(a - ans**k) < prc, f'f({a}, {k}): the answer differs by {abs(a - ans**k):.10f} from the true one'\n \ndef test(func):\n rng = np.random.default_rng(12345)\n test_len = 1000\n vals = rng.integers(low=0, high=1000, size=test_len)\n pws = rng.integers(low=1, high=100, size=test_len)\n\n for a, k in zip(vals, pws):\n not_printable_test(a, k, func)\n \n print(f'All {test_len} tests have passed!')\n","_____no_output_____"],["def root_bisection(a: float, k: float, iters=1000) -> float:\n def f(x):\n return x**k - a\n\n assert k > 0, 'Negative `k` values are not allowed'\n\n l, r = 0, a\n for _ in range(iters):\n m = l + (r - l) / 2\n if f(m) * f(l) <= 0:\n r = m\n else:\n l = m\n return l + (r - l) / 2\n","_____no_output_____"],["test(root_bisection)","All 1000 tests have passed!\n"],["printable_test(1350, 12, root_bisection)\nprint('\\n')\nprintable_test(-1, 1, root_bisection)","Our result: 1350^(1/12) ~ 1.8233126596\nTrue result: 1.8233126596\n\nApprox a ~ 1350.0000000000\nTrue a = 1350\n\n\nOur result: -1^(1/1) ~ -1.0000000000\nTrue result: -1.0000000000\n\nApprox a ~ -1.0000000000\nTrue a = -1\n"],["def root_newton(a: float, k: float, iters=1000) -> float:\n def f(x):\n return x**k - a\n\n def dx(x):\n return k * x**(k - 1)\n\n assert k > 0, 'Negative `k` values are not allowed'\n \n x = 1\n for _ in range(iters):\n x = x - f(x) / dx(x)\n\n return x\n","_____no_output_____"],["test(root_newton)","All 1000 tests have passed!\n"],["printable_test(1350, 12, root_newton)\nprint('\\n')\nprintable_test(-1, 1, root_newton)","Our result: 1350^(1/12) ~ 1.8233126596\nTrue result: 1.8233126596\n\nApprox a ~ 1350.0000000000\nTrue a = 1350\n\n\nOur result: -1^(1/1) ~ -1.0000000000\nTrue result: -1.0000000000\n\nApprox a ~ -1.0000000000\nTrue a = -1\n"]],[["## Дан многочлен P степени не больше 5 и отрезок [L, R]","_____no_output_____"],["Локализовать корни: $ P(L_i) \\cdot P(R_i) <0 $ \n\nИ найти на каждом таком отрезке корни","_____no_output_____"]],[["from typing import List\nimport numpy as np\n\nclass Polynom:\n def __init__(self, coefs: List[float]):\n # self.coefs = [a0, a1, a2, ...., an]\n self.coefs = coefs\n\n def __str__(self):\n if not self.coefs:\n return ''\n descr = str(self.coefs[0])\n for i, coef in enumerate(self.coefs[1:]):\n sign = '+' if coef > 0 else '-'\n descr += f' {sign} {abs(coef)} * x^{i + 1}'\n return descr\n \n def __repr__(self):\n return self.__str__()\n\n def value_at(self, x: float) -> float:\n res = 0\n for i, coef in enumerate(self.coefs):\n res += x**i * coef\n return res\n \n def dx(self):\n if not self.coefs or len(self.coefs) == 1:\n return Polynom([0])\n return Polynom([(i + 1) * coef for i, coef in enumerate(self.coefs[1:])])\n \n def is_root(self, x: float, prc=1e-4) -> bool:\n return abs(0 - self.value_at(x)) < prc\n \n def root_segments(self, l: float, r: float, min_seg_len=1e-2) -> List[List[float]]:\n segs = []\n prev_end, cur_end = l, l + min_seg_len\n\n while cur_end < r:\n if self.value_at(prev_end) * self.value_at(cur_end) < 0:\n segs.append([prev_end, cur_end])\n prev_end = cur_end\n\n if self.value_at(cur_end) == 0:\n move = min_seg_len / 10\n segs.append([prev_end, cur_end + move])\n prev_end = cur_end + move\n\n cur_end += min_seg_len\n\n return segs\n \n def find_single_root(self, l: float, r: float, iters=1000) -> float:\n for _ in range(iters):\n m = l + (r - l) / 2\n if self.value_at(l) * self.value_at(m) < 0:\n r = m\n else:\n l = m\n return l + (r - l) / 2\n \n def find_roots(self, l: float, r: float) -> List[float]:\n roots = []\n segs = self.root_segments(l, r)\n for seg_l, seg_r in segs:\n roots.append(self.find_single_root(seg_l, seg_r))\n \n return roots\n \n def check_roots(self, roots: List[float]) -> bool:\n return np.all([self.is_root(x) for x in roots])\n \n def find_min(self, l: float, r: float) -> float:\n assert self.coefs, 'Polynom must contain at least one coef'\n\n if len(self.coefs) == 1:\n return self.coefs[0]\n\n pts = [l, *self.dx().find_roots(l, r), r]\n return min(self.value_at(pt) for pt in pts)\n","_____no_output_____"],["# x^3 + 97.93 x^2 - 229.209 x + 132.304\n# (x - 1.1) * (x - 1.2) * (x + 100.23)\np = Polynom([132.304, -229.209, 97.93, 1])\np.find_roots(-1000, 1000)","_____no_output_____"],["p.find_min(-10, 10)","_____no_output_____"],["Polynom([1, 2, 1]).find_roots(-1000, 1000)","_____no_output_____"],["Polynom([1, -2]).find_roots(-1000, 1000)","_____no_output_____"]],[["## Найти минимум функции $ e^{ax} + e^{-bx} + c(x - d)^2$","_____no_output_____"]],[["from numpy import exp\nfrom typing import Tuple\n\nclass ExpMinFinder:\n def __init__(self, a: float, b: float, c: float, d: float):\n if a <= 0 or b <= 0 or c <= 0:\n raise ValueError(\"Parameters must be non-negative\")\n self.a = a\n self.b = b\n self.c = c\n self.d = d\n \n def f(self, x) -> float:\n return exp(self.a * x) + exp(-self.b * x) + self.c * (x - self.d)**2\n \n def dx(self, x) -> float:\n return self.a * exp(self.a * x) - self.b * exp(-self.b * x) + 2 * self.c * (x - self.d)\n \n def ddx(self, x) -> float:\n return self.a**2 * exp(self.a * x) + self.b**2 * exp(-self.b * x) + 2 * self.c\n \n def min_bisection(self, iters=1000) -> Tuple[float, float]:\n l, r = -100, 100\n for _ in range(iters):\n m = l + (r - l) / 2\n if self.dx(m) * self.dx(l) < 0:\n r = m\n else:\n l = m\n\n min_at = l + (r - l) / 2\n return min_at, self.f(min_at)\n \n def min_newton(self, iters=1000) -> Tuple[float, float]:\n x = 1\n for _ in range(iters):\n x = x - self.dx(x) / self.ddx(x)\n\n return x, self.f(x)\n\n def min_ternary(self, iters=1000) -> Tuple[float, float]:\n l, r = -100, 100\n \n for _ in range(iters):\n m1 = l + (r - l) / 3\n m2 = r - (r - l) / 3\n if self.f(m1) >= self.f(m2):\n l = m1\n else:\n r = m2\n min_at = l + (r - l) / 2\n\n return min_at, self.f(min_at)\n","_____no_output_____"],["def test_exp():\n rng = np.random.default_rng(12345)\n test_len = 100\n a_ = rng.integers(low=1, high=10, size=test_len)\n b_ = rng.integers(low=1, high=10, size=test_len)\n c_ = rng.integers(low=1, high=10, size=test_len)\n d_ = rng.integers(low=1, high=10, size=test_len)\n \n \n for a, b, c, d in zip(a_, b_, c_, d_):\n m = ExpMinFinder(a, b, c, d)\n \n assert abs(m.min_bisection()[1] - m.min_newton()[1]) < 1e-3, \\\n f'Results: {m.min_bisection():.3f} {m.min_newton():.3f}, values: {a, b, c, d}'\n assert abs(m.min_newton()[1] - m.min_ternary()[1]) < 1e-3, \\\n f'Results: {m.min_newton():.3f} {m.min_ternary():.3f}, values: {a, b, c, d}'\n \n print(f'All {test_len} tests have passed')\n \ntest_exp()","/var/folders/n7/k2zyw7490b97hfmqkg39c6080000gn/T/ipykernel_79092/1883086689.py:17: RuntimeWarning: overflow encountered in exp\n return self.a * exp(self.a * x) - self.b * exp(-self.b * x) + 2 * self.c * (x - self.d)\n"],["m = ExpMinFinder(1, 2, 3, 4)","_____no_output_____"],["m.min_bisection()","_____no_output_____"],["m.min_newton()","_____no_output_____"],["m.min_ternary()","_____no_output_____"]]],"string":"[\n [\n [\n \"## Вычислить $ \\\\sqrt[k]{a} $ \",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"import numpy as np\\n\\ndef printable_test(a, k, f, prc=1e-4):\\n ans = f(a, k)\\n print(f'Our result: {a}^(1/{k}) ~ {ans:.10f}')\\n print(f'True result: {a**(1/k):.10f}\\\\n')\\n print(f'Approx a ~ {ans**k:.10f}')\\n print(f'True a = {a}')\\n assert abs(a - ans**k) < prc, f'the answer differs by {abs(a - ans**k):.10f} from the true one'\\n \\ndef not_printable_test(a, k, f, prc=1e-4):\\n ans = f(a, k)\\n assert abs(a - ans**k) < prc, f'f({a}, {k}): the answer differs by {abs(a - ans**k):.10f} from the true one'\\n \\ndef test(func):\\n rng = np.random.default_rng(12345)\\n test_len = 1000\\n vals = rng.integers(low=0, high=1000, size=test_len)\\n pws = rng.integers(low=1, high=100, size=test_len)\\n\\n for a, k in zip(vals, pws):\\n not_printable_test(a, k, func)\\n \\n print(f'All {test_len} tests have passed!')\\n\",\n \"_____no_output_____\"\n ],\n [\n \"def root_bisection(a: float, k: float, iters=1000) -> float:\\n def f(x):\\n return x**k - a\\n\\n assert k > 0, 'Negative `k` values are not allowed'\\n\\n l, r = 0, a\\n for _ in range(iters):\\n m = l + (r - l) / 2\\n if f(m) * f(l) <= 0:\\n r = m\\n else:\\n l = m\\n return l + (r - l) / 2\\n\",\n \"_____no_output_____\"\n ],\n [\n \"test(root_bisection)\",\n \"All 1000 tests have passed!\\n\"\n ],\n [\n \"printable_test(1350, 12, root_bisection)\\nprint('\\\\n')\\nprintable_test(-1, 1, root_bisection)\",\n \"Our result: 1350^(1/12) ~ 1.8233126596\\nTrue result: 1.8233126596\\n\\nApprox a ~ 1350.0000000000\\nTrue a = 1350\\n\\n\\nOur result: -1^(1/1) ~ -1.0000000000\\nTrue result: -1.0000000000\\n\\nApprox a ~ -1.0000000000\\nTrue a = -1\\n\"\n ],\n [\n \"def root_newton(a: float, k: float, iters=1000) -> float:\\n def f(x):\\n return x**k - a\\n\\n def dx(x):\\n return k * x**(k - 1)\\n\\n assert k > 0, 'Negative `k` values are not allowed'\\n \\n x = 1\\n for _ in range(iters):\\n x = x - f(x) / dx(x)\\n\\n return x\\n\",\n \"_____no_output_____\"\n ],\n [\n \"test(root_newton)\",\n \"All 1000 tests have passed!\\n\"\n ],\n [\n \"printable_test(1350, 12, root_newton)\\nprint('\\\\n')\\nprintable_test(-1, 1, root_newton)\",\n \"Our result: 1350^(1/12) ~ 1.8233126596\\nTrue result: 1.8233126596\\n\\nApprox a ~ 1350.0000000000\\nTrue a = 1350\\n\\n\\nOur result: -1^(1/1) ~ -1.0000000000\\nTrue result: -1.0000000000\\n\\nApprox a ~ -1.0000000000\\nTrue a = -1\\n\"\n ]\n ],\n [\n [\n \"## Дан многочлен P степени не больше 5 и отрезок [L, R]\",\n \"_____no_output_____\"\n ],\n [\n \"Локализовать корни: $ P(L_i) \\\\cdot P(R_i) <0 $ \\n\\nИ найти на каждом таком отрезке корни\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"from typing import List\\nimport numpy as np\\n\\nclass Polynom:\\n def __init__(self, coefs: List[float]):\\n # self.coefs = [a0, a1, a2, ...., an]\\n self.coefs = coefs\\n\\n def __str__(self):\\n if not self.coefs:\\n return ''\\n descr = str(self.coefs[0])\\n for i, coef in enumerate(self.coefs[1:]):\\n sign = '+' if coef > 0 else '-'\\n descr += f' {sign} {abs(coef)} * x^{i + 1}'\\n return descr\\n \\n def __repr__(self):\\n return self.__str__()\\n\\n def value_at(self, x: float) -> float:\\n res = 0\\n for i, coef in enumerate(self.coefs):\\n res += x**i * coef\\n return res\\n \\n def dx(self):\\n if not self.coefs or len(self.coefs) == 1:\\n return Polynom([0])\\n return Polynom([(i + 1) * coef for i, coef in enumerate(self.coefs[1:])])\\n \\n def is_root(self, x: float, prc=1e-4) -> bool:\\n return abs(0 - self.value_at(x)) < prc\\n \\n def root_segments(self, l: float, r: float, min_seg_len=1e-2) -> List[List[float]]:\\n segs = []\\n prev_end, cur_end = l, l + min_seg_len\\n\\n while cur_end < r:\\n if self.value_at(prev_end) * self.value_at(cur_end) < 0:\\n segs.append([prev_end, cur_end])\\n prev_end = cur_end\\n\\n if self.value_at(cur_end) == 0:\\n move = min_seg_len / 10\\n segs.append([prev_end, cur_end + move])\\n prev_end = cur_end + move\\n\\n cur_end += min_seg_len\\n\\n return segs\\n \\n def find_single_root(self, l: float, r: float, iters=1000) -> float:\\n for _ in range(iters):\\n m = l + (r - l) / 2\\n if self.value_at(l) * self.value_at(m) < 0:\\n r = m\\n else:\\n l = m\\n return l + (r - l) / 2\\n \\n def find_roots(self, l: float, r: float) -> List[float]:\\n roots = []\\n segs = self.root_segments(l, r)\\n for seg_l, seg_r in segs:\\n roots.append(self.find_single_root(seg_l, seg_r))\\n \\n return roots\\n \\n def check_roots(self, roots: List[float]) -> bool:\\n return np.all([self.is_root(x) for x in roots])\\n \\n def find_min(self, l: float, r: float) -> float:\\n assert self.coefs, 'Polynom must contain at least one coef'\\n\\n if len(self.coefs) == 1:\\n return self.coefs[0]\\n\\n pts = [l, *self.dx().find_roots(l, r), r]\\n return min(self.value_at(pt) for pt in pts)\\n\",\n \"_____no_output_____\"\n ],\n [\n \"# x^3 + 97.93 x^2 - 229.209 x + 132.304\\n# (x - 1.1) * (x - 1.2) * (x + 100.23)\\np = Polynom([132.304, -229.209, 97.93, 1])\\np.find_roots(-1000, 1000)\",\n \"_____no_output_____\"\n ],\n [\n \"p.find_min(-10, 10)\",\n \"_____no_output_____\"\n ],\n [\n \"Polynom([1, 2, 1]).find_roots(-1000, 1000)\",\n \"_____no_output_____\"\n ],\n [\n \"Polynom([1, -2]).find_roots(-1000, 1000)\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"## Найти минимум функции $ e^{ax} + e^{-bx} + c(x - d)^2$\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"from numpy import exp\\nfrom typing import Tuple\\n\\nclass ExpMinFinder:\\n def __init__(self, a: float, b: float, c: float, d: float):\\n if a <= 0 or b <= 0 or c <= 0:\\n raise ValueError(\\\"Parameters must be non-negative\\\")\\n self.a = a\\n self.b = b\\n self.c = c\\n self.d = d\\n \\n def f(self, x) -> float:\\n return exp(self.a * x) + exp(-self.b * x) + self.c * (x - self.d)**2\\n \\n def dx(self, x) -> float:\\n return self.a * exp(self.a * x) - self.b * exp(-self.b * x) + 2 * self.c * (x - self.d)\\n \\n def ddx(self, x) -> float:\\n return self.a**2 * exp(self.a * x) + self.b**2 * exp(-self.b * x) + 2 * self.c\\n \\n def min_bisection(self, iters=1000) -> Tuple[float, float]:\\n l, r = -100, 100\\n for _ in range(iters):\\n m = l + (r - l) / 2\\n if self.dx(m) * self.dx(l) < 0:\\n r = m\\n else:\\n l = m\\n\\n min_at = l + (r - l) / 2\\n return min_at, self.f(min_at)\\n \\n def min_newton(self, iters=1000) -> Tuple[float, float]:\\n x = 1\\n for _ in range(iters):\\n x = x - self.dx(x) / self.ddx(x)\\n\\n return x, self.f(x)\\n\\n def min_ternary(self, iters=1000) -> Tuple[float, float]:\\n l, r = -100, 100\\n \\n for _ in range(iters):\\n m1 = l + (r - l) / 3\\n m2 = r - (r - l) / 3\\n if self.f(m1) >= self.f(m2):\\n l = m1\\n else:\\n r = m2\\n min_at = l + (r - l) / 2\\n\\n return min_at, self.f(min_at)\\n\",\n \"_____no_output_____\"\n ],\n [\n \"def test_exp():\\n rng = np.random.default_rng(12345)\\n test_len = 100\\n a_ = rng.integers(low=1, high=10, size=test_len)\\n b_ = rng.integers(low=1, high=10, size=test_len)\\n c_ = rng.integers(low=1, high=10, size=test_len)\\n d_ = rng.integers(low=1, high=10, size=test_len)\\n \\n \\n for a, b, c, d in zip(a_, b_, c_, d_):\\n m = ExpMinFinder(a, b, c, d)\\n \\n assert abs(m.min_bisection()[1] - m.min_newton()[1]) < 1e-3, \\\\\\n f'Results: {m.min_bisection():.3f} {m.min_newton():.3f}, values: {a, b, c, d}'\\n assert abs(m.min_newton()[1] - m.min_ternary()[1]) < 1e-3, \\\\\\n f'Results: {m.min_newton():.3f} {m.min_ternary():.3f}, values: {a, b, c, d}'\\n \\n print(f'All {test_len} tests have passed')\\n \\ntest_exp()\",\n \"/var/folders/n7/k2zyw7490b97hfmqkg39c6080000gn/T/ipykernel_79092/1883086689.py:17: RuntimeWarning: overflow encountered in exp\\n return self.a * exp(self.a * x) - self.b * exp(-self.b * x) + 2 * self.c * (x - self.d)\\n\"\n ],\n [\n \"m = ExpMinFinder(1, 2, 3, 4)\",\n \"_____no_output_____\"\n ],\n [\n \"m.min_bisection()\",\n \"_____no_output_____\"\n ],\n [\n \"m.min_newton()\",\n \"_____no_output_____\"\n ],\n [\n \"m.min_ternary()\",\n \"_____no_output_____\"\n ]\n ]\n]"},"cell_types":{"kind":"list like","value":["markdown","code","markdown","code","markdown","code"],"string":"[\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\"\n]"},"cell_type_groups":{"kind":"list like","value":[["markdown"],["code","code","code","code","code","code","code"],["markdown","markdown"],["code","code","code","code","code"],["markdown"],["code","code","code","code","code","code"]],"string":"[\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\"\n ],\n [\n \"markdown\",\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\"\n ]\n]"}}},{"rowIdx":1458824,"cells":{"hexsha":{"kind":"string","value":"e7e3abdc05f59875a261798f22b4c691650af603"},"size":{"kind":"number","value":95049,"string":"95,049"},"ext":{"kind":"string","value":"ipynb"},"lang":{"kind":"string","value":"Jupyter Notebook"},"max_stars_repo_path":{"kind":"string","value":"NotebookFinalTrainTest.ipynb"},"max_stars_repo_name":{"kind":"string","value":"Riferji/Cajamar-2019"},"max_stars_repo_head_hexsha":{"kind":"string","value":"e1aecc793287ee556ee39c380966cf97627b91b5"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"NotebookFinalTrainTest.ipynb"},"max_issues_repo_name":{"kind":"string","value":"Riferji/Cajamar-2019"},"max_issues_repo_head_hexsha":{"kind":"string","value":"e1aecc793287ee556ee39c380966cf97627b91b5"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"NotebookFinalTrainTest.ipynb"},"max_forks_repo_name":{"kind":"string","value":"Riferji/Cajamar-2019"},"max_forks_repo_head_hexsha":{"kind":"string","value":"e1aecc793287ee556ee39c380966cf97627b91b5"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"avg_line_length":{"kind":"number","value":75.4956314535,"string":"75.495631"},"max_line_length":{"kind":"number","value":34958,"string":"34,958"},"alphanum_fraction":{"kind":"number","value":0.7185977759,"string":"0.718598"},"cells":{"kind":"list like","value":[[["En este Nootebock se realiza la limpieza del conjunto train, de tal manera, que al terminar la pipeline ya se puede emplear dicho conjunto para el entrenamiento de modelos.\n\nSe expondrá en un pequeño comentario en la parte superior por la razon que se realiza el cambio\n\nPara una mejor descripción se puede consultar *PreprocesadoTrainRaw.ipynb* donde se comentan un poco mejor los pasos realizados.","_____no_output_____"]],[["import pandas as pd\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\nfrom sklearn.decomposition import PCA\n\ndf = pd.read_table('Modelar_UH2019.txt', sep = '|', dtype={'HY_cod_postal':str})","_____no_output_____"],["df = pd.read_table('Modelar_UH2019.txt', sep = '|', dtype={'HY_cod_postal':str})\n# Tenemos varios Nans en HY_provincias, por lo que creamos la siguiente función que nos ayudará a imputarlos con\n# ayuda del código postal\ndef ArreglarProvincias(df):\n # Diccionario de los códigos postales. 'xxddd' --> xx es el código asociado a la provincia\n diccionario_postal = {'02':'Albacete','03':'Alicante','04':'Almería','01':'Álava','33':'Asturias',\n '05':'Avila','06':'Badajoz','07':'Baleares', '08':'Barcelona','48':'Bizkaia',\n '09':'Burgos','10':'Cáceres','11':'Cádiz','39':'Cantabria','12':'Castellón',\n '13':'Ciudad Real','14':'Córdoba','15':'A Coruña','16':'Cuenca','20':'Gipuzkoa',\n '17':'Gerona','18':'Granada','19':'Guadalajara','21':'Huelva','22':'Huesca',\n '23':'Jaén','24':'León','25':'Lérida','27':'Lugo','28':'Madrid','29':'Málaga',\n '30':'Murcia','31':'Navarra','32':'Ourense','34':'Palencia','35':'Las Palmas',\n '36':'Pontevedra','26':'La Rioja','37':'Salamanca','38':'Tenerife','40':'Segovia',\n '41':'Sevilla','42':'Soria','43':'Tarragona','44':'Teruel','45':'Toledo','46':'Valencia',\n '47':'Valladolid','49':'Zamora','50':'Zaragoza','51':'Ceuta','52':'Melilla'}\n \n # Obtenemos los códigos postales que nos faltan\n codigos_postales = df.loc[df.HY_provincia.isnull()].HY_cod_postal\n \n # Recorremos la pareja index, value\n for idx, cod in zip(codigos_postales.index, codigos_postales):\n # Del cod solo nos interesan los dos primeros valores para la provincia.\n df.loc[idx,'HY_provincia'] = diccionario_postal[cod[:2]]\n \n # Devolvemos el df de las provincias\n return df\n\n# Obtenemos nuestro df con las provincias imputadas\ndf = ArreglarProvincias(df)\n\n\n########## Metros ##############\n# Volvemos Nans los valores de 0m^2 o inferior --> Los 0 provocan errores en una nueva variable de €/m2\ndf.loc[df['HY_metros_utiles'] <= 0,'HY_metros_utiles'] = np.nan\ndf.loc[df['HY_metros_totales'] <= 0,'HY_metros_totales'] = np.nan\n\n# Obtenemos las posiciones de los valores faltantes een los metros útiles\nposiciones_nans = df['HY_metros_totales'].isnull()\n\n# Rellenamos los Nans con los metros totales\ndf.loc[posiciones_nans,'HY_metros_totales'] = df.loc[posiciones_nans,'HY_metros_utiles']\n\n# Obtenemos las posiciones de los valores faltantes een los metros útiles\nposiciones_nans = df['HY_metros_utiles'].isnull()\n\n# Rellenamos los Nans con los metros totales\ndf.loc[posiciones_nans,'HY_metros_utiles'] = df.loc[posiciones_nans,'HY_metros_totales']\n\n# Si continuamos teniendo Nans\nif df[['HY_metros_utiles', 'HY_metros_totales']].isnull().sum().sum()>0: # Hay 2 .sum para sumarlo todo\n # Agrupamos por HY_tipo\n group_tipo = df[['HY_tipo', 'HY_metros_utiles', 'HY_metros_totales']].dropna().groupby('HY_tipo').mean()\n # Cuales son los indices de los registros que tienen nans\n index_nans = df.index[df['HY_metros_utiles'].isnull()]\n for i in index_nans:\n tipo = df.loc[i, 'HY_tipo']\n df.loc[i, ['HY_metros_utiles', 'HY_metros_totales']] = group_tipo.loc[tipo]\n \n# Eliminamos los outliers\n# Definimos la cota a partir de la cual son outliers\ncota = df['HY_metros_utiles'].mean()+3*df['HY_metros_utiles'].std()\n# Y nos quedamos con todos aquellos que no la superan\ndf = df[df['HY_metros_utiles'] <= cota]\n# Idem para metros totales\n# Definimos la cota a partir de la cual son outliers\ncota = df['HY_metros_totales'].mean()+3*df['HY_metros_totales'].std()\n# Y nos quedamos con todos aquellos que no la superan\ndf = df[df['HY_metros_totales'] <= cota]\n\n# Por último, eliminamos los registros que presenten una diferencia excesiva de metros\ndif_metros = np.abs(df.HY_metros_utiles - df.HY_metros_totales)\ndf = df[dif_metros <= 500]\n\n########## Precios ############\n# Creamos una nueva variable que sea ¿Existe precio anterior?--> Si/No\ndf['PV_precio_anterior'] = df['HY_precio_anterior'].isnull()\n# Y modificamos precio anterior para que tenga los valores del precio actual como anterior\ndf.loc[df['HY_precio_anterior'].isnull(),'HY_precio_anterior'] = df.loc[df['HY_precio_anterior'].isnull(),'HY_precio']\n# Eliminamos también los precios irrisorios (Todos aquellos precios inferiores a 100€)\nv = df[['HY_precio', 'HY_precio_anterior']].apply(lambda x: x[0] <= 100 and x[1] <= 100, axis = 1)\ndf = df[v == False]\n\n\n\n######## Descripción y distribución #########\n# Creamos 2 nuevas variables con la longitud del texto expuesto (Nan = 0)\n# Igualamos los NaN a carácteres vacíos\ndf.loc[df['HY_descripcion'].isnull(),'HY_descripcion'] = ''\ndf.loc[df['HY_distribucion'].isnull(),'HY_distribucion'] = ''\n# Calculamos su longitud\ndf['PV_longitud_descripcion'] = df['HY_descripcion'].apply(lambda x: len(x))\ndf['PV_longitud_distribucion'] = df['HY_distribucion'].apply(lambda x: len(x))\n\n####### Cantidad de imágenes #########\n# Añadimos una nueva columna que es la cantidad de imágenes que tiene asociado el piso\n# El df de información de las imágenes tiene 3 columnas: id, posicion_foto, carácteres_aleatorios\ndf_imagenes = pd.read_csv('df_info_imagenes.csv', sep = '|',encoding = 'utf-8')\n# Realizamos un count de los ids de las imagenes (Y nos quedamos con el valor de la \n# variable Posiciones (Al ser un count, nos es indiferente la variable seleccionada))\ndf_count_imagenes = df_imagenes.groupby('HY_id').count()['Posiciones']\n# Definimos la función que asocia a cada id su número de imágenes\ndef AñadirCantidadImagenes(x):\n try:\n return df_count_imagenes.loc[x]\n except:\n return 0\n# Creamos la variable\ndf['PV_cantidad_imagenes'] = df['HY_id'].apply(lambda x: AñadirCantidadImagenes(x))\n\n\n######### Imputación de las variables IDEA #########\n# En el notebook ImputacionNans.ipynb se explica en mayor profundidad las funciones definidas. Por el momento, \n# para imputar los valores Nans de las variables IDEA realizamos lo siguiente:\n# -1. Hacemos la media de las variables que no son Nan por CP\n# -2. Imputamos por la media del CP\n# -3. Repetimos para aquellos codigos postales que son todo Nans con la media por provincias (Sin contar los imputados)\n# -4. Imputamos los Nans que faltan por la media general de todo (Sin contar los imputados)\nvar_list = [\n ['IDEA_pc_1960', 'IDEA_pc_1960_69', 'IDEA_pc_1970_79', 'IDEA_pc_1980_89','IDEA_pc_1990_99', 'IDEA_pc_2000_10'],\n ['IDEA_pc_comercio','IDEA_pc_industria', 'IDEA_pc_oficina', 'IDEA_pc_otros','IDEA_pc_residencial', 'IDEA_pc_trast_parking'],\n ['IDEA_ind_tienda', 'IDEA_ind_turismo', 'IDEA_ind_alimentacion'],\n ['IDEA_ind_riqueza'],\n ['IDEA_rent_alquiler'],\n ['IDEA_ind_elasticidad', 'IDEA_ind_liquidez'],\n ['IDEA_unitprice_sale_residential', 'IDEA_price_sale_residential', 'IDEA_stock_sale_residential'],\n ['IDEA_demand_sale_residential'],\n ['IDEA_unitprice_rent_residential', 'IDEA_price_rent_residential', 'IDEA_stock_rent_residential'],\n ['IDEA_demand_rent_residential'] \n]\n# Función que imputa Nans por la media de CP o Provincias (La versión de ImputacionNans.ipynb imprime el número\n# de valores faltantes después de la imputación)\ndef ImputarNans_cp(df, vars_imput, var): \n '''\n df --> Nuestro dataframe a modificar\n vars_imput --> Variables que queremos imputar.\n var --> Variable por la que queremos realizar la agrupación (HY_cod_postal ó HY_provincia)\n '''\n # Obtenemos nuestros df de grupos\n group_cp = df[[var]+vars_imput].dropna().groupby(var).mean()\n \n # Obtenemos los CP que son Nans\n codigos_nans = df.loc[df[vars_imput[0]].isnull(), var] # Valdría cualquiera de las 6 variables.\n \n # Como sabemos que códigos podremos completar y cuales no, solo utilizaremos los que se pueden completar\n cods = np.intersect1d(codigos_nans.unique(),group_cp.index)\n # Cuales son los índices de los Nans\n index_nan = df.index[df[vars_imput[0]].isnull()]\n for cod in cods:\n # Explicación del indexado: De todos los códigos que coinciden con el nuestro nos quedamos con los que tienen índice\n # nan, y para poder acceder a df, necesitamos los índices de Nan que cumplen lo del código.\n i = index_nan[(df[var] == cod)[index_nan]]\n df.loc[i, vars_imput] = group_cp.loc[cod].values\n \n # Devolvemos los dataframes\n return df, group_cp\n# Bucle que va variable por variable imputando los valores\nfor vars_group in var_list:\n #print('*'*50)\n #print('Variables:', vars_group)\n #print('-'*10+' CP '+'-'*10)\n df, group_cp = ImputarNans_cp(df, vars_group, var = 'HY_cod_postal')\n #print('-'*10+' Provincias '+'-'*10)\n df, group_provincia = ImputarNans_cp(df, vars_group, var = 'HY_provincia')\n \n # Si aún quedan Nans los ponemos a todos con la media de todo\n registros_faltantes = df[vars_group[0]].isnull().sum()\n if registros_faltantes>0:\n #print('-'*30)\n df.loc[df[vars_group[0]].isnull(), vars_group] = group_provincia.mean(axis = 0).values\n #print('Se han imputado {} registros por la media de todo'.format(registros_faltantes))\n # Guardamos los datos en la carpeta DF_grupos ya que tenemos que imputar en test por estos mismos valores.\n df.to_csv('./DF_grupos/df_filled_{}.csv'.format(vars_group[0]), sep = '|', encoding='utf-8', index = False)\n group_cp.to_csv('./DF_grupos/group_cp_{}.csv'.format(vars_group[0]), sep = '|', encoding='utf-8')\n group_provincia.to_csv('./DF_grupos/group_prov_{}.csv'.format(vars_group[0]), sep = '|', encoding='utf-8')\n\n####### Indice elasticidad ##########\n# Creamos una nueva variable que redondea el indice de elasticidad al entero más cercano (La variable toma 1,2,3,4,5)\ndf['PV_ind_elasticidad'] = np.round(df['IDEA_ind_elasticidad'])\n\n###### Antigüedad zona #########\n# Definimos la variable de antigüedad de la zona dependiendo del porcentaje de pisos construidos en la zona\n# Primero tomaremos las variables [IDEA_pc_1960,IDEA_pc_1960_69,IDEA_pc_1970_79,IDEA_pc_1980_89,\n# IDEA_pc_1990_99,IDEA_pc_2000_10] y las transformaremos en solo 3. Y luego nos quedaremos \n# con el máximo de esas tres para determinar el estado de la zona.\ndf['Viejos'] = df[['IDEA_pc_1960', 'IDEA_pc_1960_69']].sum(axis = 1)\ndf['Medios'] = df[['IDEA_pc_1970_79', 'IDEA_pc_1980_89']].sum(axis = 1)\ndf['Nuevos'] = df[['IDEA_pc_1990_99', 'IDEA_pc_2000_10']].sum(axis = 1)\ndf['PV_clase_piso'] = df[['Viejos','Medios','Nuevos']].idxmax(axis = 1)\n\n# Añadimos una nueva variable que es si la longitud de la descripción es nula, va de 0 a 1000 carácteres, ó supera los 1000\ndf['PV_longitud_descripcion2'] = pd.cut(df['PV_longitud_descripcion'], bins = [-1,0,1000, np.inf], labels=['Ninguna', 'Media', 'Larga'], include_lowest=False)\n\n# Precio de euro el metro\ndf['PV_precio_metro'] = df.HY_precio/df.HY_metros_totales\n\n# Cambiamos Provincias por 'Castellón','Murcia','Almería','Valencia','Otros'\ndef estructurar_provincias(x):\n '''\n Funcion que asocia a x (Nombre de provincia) su clase\n '''\n # Lista de clases que nos queremos quedar\n if x in ['Castellón','Murcia','Almería','Valencia']:\n return x\n else:\n return 'Otros'\ndf['PV_provincia'] = df.HY_provincia.apply(lambda x: estructurar_provincias(x))\n\n# Una nueva que es si el inmueble presenta alguna distribución\ndf.loc[df['PV_longitud_distribucion'] > 0,'PV_longitud_distribucion'] = 1\n\n# Cambiamos certificado energetico a Si/No (1/0)\ndf['PV_cert_energ'] = df['HY_cert_energ'].apply(lambda x: np.sum(x != 'No'))\n\n# Cambiamos las categorías de HY_tipo a solo 3: [Piso, Garaje, Otros]\ndef CategorizarHY_tipo(dato):\n if dato in ['Piso', 'Garaje']:\n return dato\n else:\n return 'Otros'\ndf['PV_tipo'] = df['HY_tipo'].apply(CategorizarHY_tipo)\n\n# Cambiamos la variable Garaje a Tiene/No tiene (1/0)\ndf.loc[df['HY_num_garajes']>1,'HY_num_garajes'] = 1\n\n# Cambiamos baños por 0, 1, +1 (No tiene, tiene 1, tiene mas de 1)\ndf['PV_num_banos'] = pd.cut(df['HY_num_banos'], [-1,0,1,np.inf], labels = [0,1,'+1'])\n\n# Cambiamos Num terrazas a Si/No (1/0)\ndf.loc[df['HY_num_terrazas']>1, 'HY_num_terrazas'] = 1\n\n\n# Definimos las variables a eliminar para definir nuestro conjunto X\ndrop_vars = ['HY_id', 'HY_cod_postal', 'HY_provincia', 'HY_descripcion',\n 'HY_distribucion', 'HY_tipo', 'HY_antiguedad','HY_num_banos', 'HY_cert_energ',\n 'HY_num_garajes', 'IDEA_pc_1960', 'IDEA_area', 'IDEA_poblacion', 'IDEA_densidad', 'IDEA_ind_elasticidad',\n 'Viejos', 'Medios','Nuevos']\n# Explicación:\n# + 'HY_id', 'HY_cod_postal' --> Demasiadas categorías\n# + 'HY_provincia' --> Ya tenemos PV_provincia que las agrupa\n# + 'HY_descripcion','HY_distribucion' --> Tenemos sus longitudes\n# + 'HY_tipo' --> Ya hemos creado PV_tipo\n# + 'HY_cert_energ','HY_num_garajes'--> Ya tenemos las PV asociadas (valores con 0 1)\n# + 'IDEA_pc_1960' --> Está duplicada\n# + 'IDEA_area', 'IDEA_poblacion', 'IDEA_densidad' --> Demasiados Nans\n# + 'IDEA_ind_elasticidad' --> Tenemos la variable equivalente en PV\n# + 'Viejos', 'Medios','Nuevos' --> Ya tenemos PV_clase_piso\n# + 'TARGET' --> Por motivos obvios no la queremos en X\nX = df.copy().drop(drop_vars+['TARGET'],axis = 1)\ny = df.TARGET.copy()\n\n# Eliminamos los outliers de las siguientes variables\ncont_vars = ['HY_metros_utiles', 'HY_metros_totales','GA_page_views', 'GA_mean_bounce',\n 'GA_exit_rate', 'GA_quincena_ini', 'GA_quincena_ult','PV_longitud_descripcion',\n 'PV_longitud_distribucion', 'PV_cantidad_imagenes',\n 'PV_ind_elasticidad', 'PV_precio_metro']\nfor var in cont_vars:\n cota = X[var].mean()+3*X[var].std()\n y = y[X[var]<=cota]\n X = X[X[var]<=cota]\n# Y eliminamos los Outliers de nuestra variable respuesta\nX = X[y <= y.mean()+3*y.std()]\ny = y[y <= y.mean()+3*y.std()]\n# Realizamos el logaritmo de nuestra variable respuesta (Nota: Sumamos 1 para evitar log(0))\ny = np.log(y+1)\n\n\n# Creamos las variables Dummy para las categóricas\ndummy_vars = ['PV_provincia','PV_longitud_descripcion2',\n 'PV_clase_piso','PV_tipo','PV_num_banos']\n# Unimos nuestro conjunto con el de dummies\nX = X.join(pd.get_dummies(X[dummy_vars]))\n# Eliminamos las variables que ya son Dummies\nX = X.drop(dummy_vars, axis=1)\n\n\n############# PCA ####################\n# Realizamos una PCA con las variables IDEA (Nota: soolo tomamos 1 componente porque nos explica el 99.95% de la varianza)\nidea_vars_price = [\n 'IDEA_unitprice_sale_residential', 'IDEA_price_sale_residential',\n 'IDEA_stock_sale_residential', 'IDEA_demand_sale_residential',\n 'IDEA_unitprice_rent_residential', 'IDEA_price_rent_residential',\n 'IDEA_stock_rent_residential', 'IDEA_demand_rent_residential']\n\npca_prices = PCA(n_components=1)\nidea_pca_price = pca_prices.fit_transform(X[idea_vars_price])\nX['PV_idea_pca_price'] = (idea_pca_price-idea_pca_price.min())/(idea_pca_price.max()-idea_pca_price.min())\n# Realizamos una PCA con las variables IDEA (Nota: soolo tomamos 1 componente porque nos explica el 78% de la varianza)\nidea_vars_pc = [\n 'IDEA_pc_comercio',\n 'IDEA_pc_industria', 'IDEA_pc_oficina', 'IDEA_pc_otros',\n 'IDEA_pc_residencial', 'IDEA_pc_trast_parking', 'IDEA_ind_tienda',\n 'IDEA_ind_turismo', 'IDEA_ind_alimentacion', 'IDEA_ind_riqueza',\n 'IDEA_rent_alquiler', 'IDEA_ind_liquidez']\n\npca_pc = PCA(n_components=1)\nidea_pca_pc = pca_pc.fit_transform(X[idea_vars_pc])\nX['PV_idea_pca_pc'] = (idea_pca_pc-idea_pca_pc.min())/(idea_pca_pc.max()-idea_pca_pc.min())\n\n# Nos quedamos con la información PCA de nuestras PV \npca_PV = PCA(n_components=3)\nPV_pca = pca_PV.fit_transform(X[['PV_cert_energ',\n 'PV_provincia_Almería', 'PV_provincia_Castellón', 'PV_provincia_Murcia',\n 'PV_provincia_Otros', 'PV_provincia_Valencia',\n 'PV_longitud_descripcion2_Larga', 'PV_longitud_descripcion2_Media',\n 'PV_longitud_descripcion2_Ninguna', 'PV_clase_piso_Medios',\n 'PV_clase_piso_Nuevos', 'PV_clase_piso_Viejos', 'PV_tipo_Garaje',\n 'PV_tipo_Otros', 'PV_tipo_Piso', 'PV_num_banos_0', 'PV_num_banos_1',\n 'PV_num_banos_+1']])\n\nX['PV_pca1'] = PV_pca[:, 0]\nX['PV_pca2'] = PV_pca[:, 1]\nX['PV_pca3'] = PV_pca[:, 2]\n\n# Eliminamos los posibles outliers creados\npca_vars = ['PV_idea_pca_price', 'PV_idea_pca_pc','PV_pca1', 'PV_pca2', 'PV_pca3']\nfor var in pca_vars:\n cota = X[var].mean()+3*X[var].std()\n y = y[X[var]<=cota]\n X = X[X[var]<=cota]\n\nX = X.drop([\n 'IDEA_unitprice_sale_residential', 'IDEA_price_sale_residential',\n 'IDEA_stock_sale_residential', 'IDEA_demand_sale_residential',\n 'IDEA_unitprice_rent_residential', 'IDEA_price_rent_residential',\n 'IDEA_stock_rent_residential', 'IDEA_demand_rent_residential',\n 'IDEA_pc_comercio',\n 'IDEA_pc_industria', 'IDEA_pc_oficina', 'IDEA_pc_otros',\n 'IDEA_pc_residencial', 'IDEA_pc_trast_parking', 'IDEA_ind_tienda',\n 'IDEA_ind_turismo', 'IDEA_ind_alimentacion', 'IDEA_ind_riqueza',\n 'IDEA_rent_alquiler', 'IDEA_ind_liquidez', 'PV_cert_energ',\n 'PV_provincia_Almería', 'PV_provincia_Castellón', 'PV_provincia_Murcia',\n 'PV_provincia_Otros', 'PV_provincia_Valencia',\n 'PV_longitud_descripcion2_Larga', 'PV_longitud_descripcion2_Media',\n 'PV_longitud_descripcion2_Ninguna', 'PV_clase_piso_Medios',\n 'PV_clase_piso_Nuevos', 'PV_clase_piso_Viejos', 'PV_tipo_Garaje',\n 'PV_tipo_Otros', 'PV_tipo_Piso', 'PV_num_banos_0', 'PV_num_banos_1',\n 'PV_num_banos_+1'], axis = 1)","_____no_output_____"]],[["# Entrenamiento de modelos\n\nHemos entrenado una gran cantidad de modelos, incluso podríamos llegar a decir que más de 1000 (a base de bucles y funciones) para ver cual es el que más se ajusta a nuestro dataset. Y para no tenerlos nadando entre los cientos de pruebas que hemos reaalizado en los notebooks *Modelos2\\_TestingsModelos.ipynb*, *Modelos3\\_featureSelection.ipynb*, *Modelos4\\_ForwardAndEnsemble.ipynb*","_____no_output_____"]],[["import pandas as pd\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\nfrom sklearn.model_selection import train_test_split\n\nfrom sklearn import linear_model\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn import svm\nfrom sklearn import neighbors\nfrom sklearn import tree\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.ensemble import ExtraTreesRegressor\nfrom sklearn.ensemble import GradientBoostingRegressor\nfrom sklearn.neural_network import MLPRegressor\n\nimport xgboost as xgb\nfrom xgboost.sklearn import XGBRegressor\n\n# Métrica\nfrom sklearn.metrics import median_absolute_error\n\n\nmodels = {\n 'DecisionTreeRegressor10':tree.DecisionTreeRegressor(max_depth = 10),\n 'RandomForestRegressor20':RandomForestRegressor(max_depth=5, n_estimators = 20, random_state=0),\n 'RandomForestRegressor50':RandomForestRegressor(max_depth=10, n_estimators = 50, random_state=0),\n 'RandomForestRegressor100':RandomForestRegressor(max_depth=10, n_estimators = 100, random_state=0),\n 'ExtraTreesRegressor10':ExtraTreesRegressor(n_estimators=10,random_state=0),\n 'ExtraTreesRegressor100':ExtraTreesRegressor(n_estimators=100, random_state=0),\n 'ExtraTreesRegressor150':ExtraTreesRegressor(n_estimators=150, random_state=0),\n 'GradientBoostingRegressor30_md5':GradientBoostingRegressor(n_estimators=30, learning_rate=0.1, max_depth=5, random_state=0, loss='ls'),\n 'GradientBoostingRegressor50_md5':GradientBoostingRegressor(n_estimators=50, learning_rate=0.1, max_depth=5, random_state=0, loss='ls'),\n 'XGB25':XGBRegressor(max_depth = 10, n_estimators=25, random_state=7),\n 'XGB46':XGBRegressor(max_depth = 10, n_estimators=46, random_state=7),\n 'XGB60':XGBRegressor(max_depth = 10, n_estimators=60, random_state=7),\n 'XGB100':XGBRegressor(max_depth = 10, n_estimators=100, random_state=7)\n }\n\ndef EntrenarModelos(X, y, models, drop_vars):\n '''\n X, y --> Nuestra data\n models --> Diccionario de modelos a entrenar\n drop_vars --> Variables que no queremos en nuestro modelo\n '''\n X_train, X_test, y_train, y_test = train_test_split(X.drop(drop_vars, axis = 1), y, test_size=0.3, random_state=7)\n \n y_test_predict = {}\n errores = {}\n # Definimos el diccionario donde vamos guardando el mejor modelo con su error asociado\n minimo = {'':np.inf}\n for name, model in models.items():\n #try:\n model = model.fit(X_train, y_train)\n y_test_predict[name] = model.predict(X_test)\n errores[name] = median_absolute_error(np.exp(y_test)-1, np.exp(y_test_predict[name])-1)\n \n print(name,': ', errores[name], sep = '')\n \n # Actualizamos el diccionario\n if list(minimo.values())[0] > errores[name]:\n minimo = {name:errores[name]}\n return minimo\n\nEntrenarModelos(X, y, models, [])","DecisionTreeRegressor10: 21.55431393653663\nRandomForestRegressor20: 18.580995303598044\nRandomForestRegressor50: 19.072373408609195\nRandomForestRegressor100: 18.861664050362826\nExtraTreesRegressor10: 19.80307387148771\nExtraTreesRegressor100: 18.588761921652768\nExtraTreesRegressor150: 18.57115721270116\nGradientBoostingRegressor30_md5: 19.084825961682014\nGradientBoostingRegressor50_md5: 18.973164773235773\nXGB25: 18.734364471435548\nXGB46: 18.948498382568367\nXGB60: 19.172454528808608\nXGB100: 19.46763259887696\n"]],[["Para la optimización de los parámetros implementamos un grid search manual con el que vamos variando los parámetros mediante bucles for. Nosotros encontramos el óptimo en *n_estimators= 30, reg_lambda* = 0.9, *subsample = 0.6*, *colsample_bytree = 0.7*","_____no_output_____"]],[["models = {'BestXGBoost' : XGBRegressor(max_depth = 10, \n n_estimators= 30, \n reg_lambda = 0.9,\n subsample = 0.6,\n colsample_bytree = 0.7,\n objective = 'reg:linear',\n random_state=7)\n }","_____no_output_____"],["EntrenarModelos(X, y, models, [])","BestXGBoost: 17.369460296630855\n"],["# Variable que indica si queremos iniciar la búsqueda\nQuererBuscar = False\n\nif QuererBuscar == True:\n models = {}\n for i1 in [30, 40, 46, 50, 60]:\n for i2 in [0.7, 0.8, 0.9, 1]:\n for i3 in [0.5, 0.6, 0.7, 0.8, 0.9, 1]:\n for i4 in [0.5, 0.6, 0.7, 0.8, 0.9, 1]:\n models['XGB_{}_{}_{}_{}'.format(i1, i2, i3, i4)] = XGBRegressor(max_depth = 10, \n n_estimators= i1, \n reg_lambda = i2,\n subsample = i3,\n colsample_bytree = i4,\n objective = 'reg:linear',\n random_state=7)\n print(len(models))\n \nelse:\n models = {'BestXGBoost' : XGBRegressor(max_depth = 10, \n n_estimators= 30, \n reg_lambda = 0.9,\n subsample = 0.6,\n colsample_bytree = 0.7,\n objective = 'reg:linear',\n random_state=7)\n }\n \nEntrenarModelos(X, y, models, [])","BestXGBoost: 17.369460296630855\n"]],[["Una vez definido el mejor modelo vamos a realizar una búsqueda de las mejores variables. Y para ello definimos una función forward que nos vaya añadiendo variables según su error.","_____no_output_____"]],[["def Entrenar(X,y,model):\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=7)\n \n model = model.fit(X_train, y_train)\n \n y_pred = model.predict(X_test)\n error = median_absolute_error(np.exp(y_test)-1, np.exp(y_pred)-1)\n \n return error\n\ndef EntrenarForward(X, y, model, ini_vars):\n '''\n X,y --> Nuestra data\n model --> un modelo\n ini_vars --> variables con las que comenzamos\n '''\n # Variable que indica si hemos terminado\n fin = False\n \n # Variables con las que estamos trabajando\n current_vars = ini_vars\n all_vars = X.columns\n possible_vars = np.setdiff1d(all_vars, current_vars)\n \n while not fin and len(possible_vars) > 0: # Lo que antes pase\n possible_vars = np.setdiff1d(all_vars, current_vars)\n \n if len(current_vars) == 0:\n # Si no tenemos variables, cuestro error es inf\n best_error = np.inf\n else:\n base_error = Entrenar(X[current_vars], y, model)\n best_error = base_error\n \n best_var = ''\n for var in possible_vars:\n var_error = Entrenar(X[current_vars+[var]],y,model)\n \n if var_error < best_error:\n best_error = var_error\n best_var = var\n \n print('Best var: {} --> {:.4f}'.format(best_var, best_error))\n # Si tenemos una best_var \n if len(best_var) > 0:\n current_vars += [best_var]\n else: \n fin = True\n \n print('Best vars:', current_vars)\n print('Best error:', best_error)\n \n return best_error","_____no_output_____"],["EntrenarForward(X, y, XGBRegressor(max_depth = 10, \n n_estimators= 30, \n reg_lambda = 0.9,\n subsample = 0.6,\n colsample_bytree = 0.7,\n objective = 'reg:linear',\n random_state=7), \n [])","Best var: GA_page_views --> 18.9004\nBest var: GA_mean_bounce --> 18.6871\nBest var: IDEA_pc_1970_79 --> 18.4164\nBest var: PV_pca2 --> 18.1765\nBest var: PV_longitud_descripcion --> 18.1751\nBest var: --> 18.1751\nBest vars: ['GA_page_views', 'GA_mean_bounce', 'IDEA_pc_1970_79', 'PV_pca2', 'PV_longitud_descripcion']\nBest error: 18.17510498046875\n"],["EntrenarForward(X, y, XGBRegressor(max_depth = 10, \n n_estimators= 30, \n reg_lambda = 0.9,\n subsample = 0.6,\n colsample_bytree = 0.7,\n objective = 'reg:linear',\n random_state=7), \n ['HY_precio'])","Best var: GA_page_views --> 18.8182\nBest var: PV_pca2 --> 18.6006\nBest var: IDEA_pc_1960_69 --> 18.4076\nBest var: GA_mean_bounce --> 18.2948\nBest var: GA_exit_rate --> 18.1165\nBest var: PV_longitud_descripcion --> 18.1131\nBest var: IDEA_pc_2000_10 --> 18.0981\nBest var: IDEA_pc_1990_99 --> 17.9477\nBest var: PV_pca3 --> 17.8531\nBest var: --> 17.8531\nBest vars: ['HY_precio', 'GA_page_views', 'PV_pca2', 'IDEA_pc_1960_69', 'GA_mean_bounce', 'GA_exit_rate', 'PV_longitud_descripcion', 'IDEA_pc_2000_10', 'IDEA_pc_1990_99', 'PV_pca3']\nBest error: 17.85312286376954\n"]],[["Observemos las feature importances de nuestro mejor árbol ya que no mejoramos con el forward.","_____no_output_____"]],[["X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=7)\n \nxgb_model = XGBRegressor(max_depth = 10, \n n_estimators= 30, \n reg_lambda = 0.9,\n subsample = 0.6,\n colsample_bytree = 0.7,\n objective = 'reg:linear',\n random_state=7).fit(X_train, y_train)\ny_test_predict = xgb_model.predict(X_test)\nerror = median_absolute_error(np.exp(y_test)-1, np.exp(y_test_predict)-1)\n \nprint('BestXGBoost: ', error, sep = '')","BestXGBoost: 17.369460296630855\n"],["v = xgb_model.feature_importances_\nplt.figure(figsize=(8,6))\nplt.bar(range(len(v)), v)\nplt.title('Feature Importances')\nplt.xticks(range(len(X_train.columns)), list(X_train.columns), rotation = 90)\nplt.show()","_____no_output_____"],["# Ordenamos las variables de menor a mayor\nfeatures_ordered, colnames_ordered = zip(*sorted(zip(xgb_model.feature_importances_, X_train.columns)))\n# No tenemos en cuenta las 10 peores variables\nEntrenarModelos(X, y, models, list(colnames_ordered[10:]))","BestXGBoost: 20.791393280029297\n"]],[["Por lo que el mejor modelo es el primer XGBoost entrenado","_____no_output_____"],["# Conjunto de Test\n\nRealizamos las mismas transforaciones para test","_____no_output_____"]],[["df = pd.read_table('Estimar_UH2019.txt', sep = '|', dtype={'HY_cod_postal':str})\n# Tenemos varios Nans en HY_provincias, por lo que creamos la siguiente función que nos ayudará a imputarlos con\n# ayuda del código postal\ndef ArreglarProvincias(df):\n # Diccionario de los códigos postales. 'xxddd' --> xx es el código asociado a la provincia\n diccionario_postal = {'02':'Albacete','03':'Alicante','04':'Almería','01':'Álava','33':'Asturias',\n '05':'Avila','06':'Badajoz','07':'Baleares', '08':'Barcelona','48':'Bizkaia',\n '09':'Burgos','10':'Cáceres','11':'Cádiz','39':'Cantabria','12':'Castellón',\n '13':'Ciudad Real','14':'Córdoba','15':'A Coruña','16':'Cuenca','20':'Gipuzkoa',\n '17':'Gerona','18':'Granada','19':'Guadalajara','21':'Huelva','22':'Huesca',\n '23':'Jaén','24':'León','25':'Lérida','27':'Lugo','28':'Madrid','29':'Málaga',\n '30':'Murcia','31':'Navarra','32':'Ourense','34':'Palencia','35':'Las Palmas',\n '36':'Pontevedra','26':'La Rioja','37':'Salamanca','38':'Tenerife','40':'Segovia',\n '41':'Sevilla','42':'Soria','43':'Tarragona','44':'Teruel','45':'Toledo','46':'Valencia',\n '47':'Valladolid','49':'Zamora','50':'Zaragoza','51':'Ceuta','52':'Melilla'}\n \n # Obtenemos los códigos postales que nos faltan\n codigos_postales = df.loc[df.HY_provincia.isnull()].HY_cod_postal\n \n # Recorremos la pareja index, value\n for idx, cod in zip(codigos_postales.index, codigos_postales):\n # Del cod solo nos interesan los dos primeros valores para la provincia.\n df.loc[idx,'HY_provincia'] = diccionario_postal[cod[:2]]\n \n # Devolvemos el df de las provincias\n return df\n\n# Obtenemos nuestro df con las provincias imputadas\ndf = ArreglarProvincias(df)\n\n\n########## Metros ##############\n# Volvemos Nans los valores de 0m^2 o inferior --> Los 0 provocan errores en una nueva variable de €/m2\ndf.loc[df['HY_metros_utiles'] <= 0,'HY_metros_utiles'] = np.nan\ndf.loc[df['HY_metros_totales'] <= 0,'HY_metros_totales'] = np.nan\n\n# Obtenemos las posiciones de los valores faltantes en los metros útiles\nposiciones_nans = df['HY_metros_totales'].isnull()\n# Rellenamos los Nans con los metros totales\ndf.loc[posiciones_nans,'HY_metros_totales'] = df.loc[posiciones_nans,'HY_metros_utiles']\n\n# Obtenemos las posiciones de los valores faltantes een los metros útiles\nposiciones_nans = df['HY_metros_utiles'].isnull()\n# Rellenamos los Nans con los metros totales\ndf.loc[posiciones_nans,'HY_metros_utiles'] = df.loc[posiciones_nans,'HY_metros_totales']\n\n# Si continuamos teniendo Nans\nif df[['HY_metros_utiles', 'HY_metros_totales']].isnull().sum().sum()>0: # Hay 2 .sum para sumarlo todo\n # Cuales son los indices de los registros que tienen nans\n index_nans = df.index[df['HY_metros_utiles'].isnull()]\n for i in index_nans:\n tipo = df.loc[i, 'HY_tipo']\n df.loc[i, ['HY_metros_utiles', 'HY_metros_totales']] = group_tipo.loc[tipo] # Recuperamos group_tipo\n \n\n########## Precios ############\n# Creamos una nueva variable que sea ¿Existe precio anterior?--> Si/No\ndf['PV_precio_anterior'] = df['HY_precio_anterior'].isnull()\n# Y modificamos precio anterior para que tenga los valores del precio actual como anterior\ndf.loc[df['HY_precio_anterior'].isnull(),'HY_precio_anterior'] = df.loc[df['HY_precio_anterior'].isnull(),'HY_precio']\n\n\n######## Descripción y distribución #########\n# Creamos 2 nuevas variables con la longitud del texto expuesto (Nan = 0)\n# Igualamos los NaN a carácteres vacíos\ndf.loc[df['HY_descripcion'].isnull(),'HY_descripcion'] = ''\ndf.loc[df['HY_distribucion'].isnull(),'HY_distribucion'] = ''\n# Calculamos su longitud\ndf['PV_longitud_descripcion'] = df['HY_descripcion'].apply(lambda x: len(x))\ndf['PV_longitud_distribucion'] = df['HY_distribucion'].apply(lambda x: len(x))\n\n####### Cantidad de imágenes #########\n# Añadimos una nueva columna que es la cantidad de imágenes que tiene asociado el piso\n# El df de información de las imágenes tiene 3 columnas: id, posicion_foto, carácteres_aleatorios\ndf_imagenes = pd.read_csv('df_info_imagenes.csv', sep = '|',encoding = 'utf-8')\n# Realizamos un count de los ids de las imagenes (Y nos quedamos con el valor de la \n# variable Posiciones (Al ser un count, nos es indiferente la variable seleccionada))\ndf_count_imagenes = df_imagenes.groupby('HY_id').count()['Posiciones']\n# Definimos la función que asocia a cada id su número de imágenes\ndef AñadirCantidadImagenes(x):\n try:\n return df_count_imagenes.loc[x]\n except:\n return 0\n# Creamos la variable\ndf['PV_cantidad_imagenes'] = df['HY_id'].apply(lambda x: AñadirCantidadImagenes(x))\n\n\n######### Imputación de las variables IDEA #########\n# En el notebook ImputacionNans.ipynb se explica en mayor profundidad las funciones definidas. Por el momento, \n# para imputar los valores Nans de las variables IDEA realizamos lo siguiente:\n# -1. Hacemos la media de las variables que no son Nan por CP\n# -2. Imputamos por la media del CP\n# -3. Repetimos para aquellos codigos postales que son todo Nans con la media por provincias (Sin contar los imputados)\n# -4. Imputamos los Nans que faltan por la media general de todo (Sin contar los imputados)\nvar_list = [\n ['IDEA_pc_1960', 'IDEA_pc_1960_69', 'IDEA_pc_1970_79', 'IDEA_pc_1980_89','IDEA_pc_1990_99', 'IDEA_pc_2000_10'],\n ['IDEA_pc_comercio','IDEA_pc_industria', 'IDEA_pc_oficina', 'IDEA_pc_otros','IDEA_pc_residencial', 'IDEA_pc_trast_parking'],\n ['IDEA_ind_tienda', 'IDEA_ind_turismo', 'IDEA_ind_alimentacion'],\n ['IDEA_ind_riqueza'],\n ['IDEA_rent_alquiler'],\n ['IDEA_ind_elasticidad', 'IDEA_ind_liquidez'],\n ['IDEA_unitprice_sale_residential', 'IDEA_price_sale_residential', 'IDEA_stock_sale_residential'],\n ['IDEA_demand_sale_residential'],\n ['IDEA_unitprice_rent_residential', 'IDEA_price_rent_residential', 'IDEA_stock_rent_residential'],\n ['IDEA_demand_rent_residential'] \n]\n\n# Función para arregla los codigos postales mal leidos (Son leidos como enteros).\ndef ArreglarCP(cp):\n if len(cp)==4:\n return '0'+cp\n else:\n return cp\n \ndef ImputarNans_test(df, vars_imput, var):\n '''\n df --> Nuestro dataframe a modificar\n vars_imput --> Variables que queremos imputar.\n var --> Variable por la que queremos realizar la agrupación (HY_cod_postal ó HY_provincia)\n '''\n # Obtenemos nuestros df definidos durante el Train\n if var == 'HY_cod_postal':\n # Hay un error en la escritura del CP, ya que se guardó como int\n group_cp = pd.read_csv('./DF_grupos/group_cp_{}.csv'.format(vars_imput[0]), sep = '|', encoding='utf-8', dtype={'HY_cod_postal': str})\n group_cp['HY_cod_postal'] = group_cp['HY_cod_postal'].apply(lambda x: ArreglarCP(x))\n group_cp.index = group_cp['HY_cod_postal']\n group_cp = group_cp.drop('HY_cod_postal',axis = 1)\n elif var == 'HY_provincia':\n group_cp = pd.read_csv('./DF_grupos/group_prov_{}.csv'.format(vars_imput[0]), sep = '|', encoding='utf-8', index_col='HY_provincia')\n else:\n print('Solo se acepta HY_cod_postal ó HY_provincia como valor de \"var\"')\n \n # Obtenemos los CP que son Nans\n codigos_nans = df.loc[df[vars_imput[0]].isnull(), var] # Valdría cualquiera de las variables.\n \n # Como sabemos que códigos podremos completar y cuales no, solo utilizaremos los que se pueden completar\n cods = np.intersect1d(codigos_nans.unique(),group_cp.index)\n # Cuales son los índices de los Nans\n index_nan = df.index[df[vars_imput[0]].isnull()]\n for cod in cods:\n # Explicación del indexado: De todos los códigos que coinciden con el nuestro nos quedamos con los que tienen índice\n # nan, y para poder acceder a df, necesitamos los índices de Nan que cumplen lo del código.\n i = index_nan[(df[var] == cod)[index_nan]]\n df.loc[i, vars_imput] = group_cp.loc[cod].values\n \n # Si ya hemos terminado de imputar y aún nos quedan Nans imputamos por la media de todo\n if var == 'HY_provincia' and df[vars_imput[0]].isnull().sum()>0:\n df.loc[df[vars_imput[0]].isnull(), vars_imput] = group_cp.mean(axis = 0).values\n \n # Devolvemos el dataframe imputado\n return df\n\n# Como en el caso anterior, vamos conjunto por conjunto\nfor vars_group in var_list:\n df = ImputarNans_test(df, vars_group, var = 'HY_cod_postal')\n df = ImputarNans_test(df, vars_group, var = 'HY_provincia')\n\n####### Indice elasticidad ##########\n# Creamos una nueva variable que redondea el indice de elasticidad al entero más cercano (La variable toma 1,2,3,4,5)\ndf['PV_ind_elasticidad'] = np.round(df['IDEA_ind_elasticidad'])\n\n###### Antigüedad zona #########\n# Definimos la variable de antigüedad de la zona dependiendo del porcentaje de pisos construidos en la zona\n# Primero tomaremos las variables [IDEA_pc_1960,IDEA_pc_1960_69,IDEA_pc_1970_79,IDEA_pc_1980_89,\n# IDEA_pc_1990_99,IDEA_pc_2000_10] y las transformaremos en solo 3. Y luego nos quedaremos \n# con el máximo de esas tres para determinar el estado de la zona.\ndf['Viejos'] = df[['IDEA_pc_1960', 'IDEA_pc_1960_69']].sum(axis = 1)\ndf['Medios'] = df[['IDEA_pc_1970_79', 'IDEA_pc_1980_89']].sum(axis = 1)\ndf['Nuevos'] = df[['IDEA_pc_1990_99', 'IDEA_pc_2000_10']].sum(axis = 1)\ndf['PV_clase_piso'] = df[['Viejos','Medios','Nuevos']].idxmax(axis = 1)\n\n# Añadimos una nueva variable que es si la longitud de la descripción es nula, va de 0 a 1000 carácteres, ó supera los 1000\ndf['PV_longitud_descripcion2'] = pd.cut(df['PV_longitud_descripcion'], bins = [-1,0,1000, np.inf], labels=['Ninguna', 'Media', 'Larga'], include_lowest=False)\n\n# Precio de euro el metro\ndf['PV_precio_metro'] = df.HY_precio/df.HY_metros_totales\n\n# Cambiamos Provincias por 'Castellón','Murcia','Almería','Valencia','Otros'\ndef estructurar_provincias(x):\n '''\n Funcion que asocia a x (Nombre de provincia) su clase\n '''\n # Lista de clases que nos queremos quedar\n if x in ['Castellón','Murcia','Almería','Valencia']:\n return x\n else:\n return 'Otros'\ndf['PV_provincia'] = df.HY_provincia.apply(lambda x: estructurar_provincias(x))\n\n# Una nueva que es si el inmueble presenta alguna distribución\ndf.loc[df['PV_longitud_distribucion'] > 0,'PV_longitud_distribucion'] = 1\n\n# Cambiamos certificado energetico a Si/No (1/0)\ndf['PV_cert_energ'] = df['HY_cert_energ'].apply(lambda x: np.sum(x != 'No'))\n\n# Cambiamos las categorías de HY_tipo a solo 3: [Piso, Garaje, Otros]\ndef CategorizarHY_tipo(dato):\n if dato in ['Piso', 'Garaje']:\n return dato\n else:\n return 'Otros'\ndf['PV_tipo'] = df['HY_tipo'].apply(CategorizarHY_tipo)\n\n# Cambiamos la variable Garaje a Tiene/No tiene (1/0)\ndf.loc[df['HY_num_garajes']>1,'HY_num_garajes'] = 1\n\n# Cambiamos baños por 0, 1, +1 (No tiene, tiene 1, tiene mas de 1)\ndf['PV_num_banos'] = pd.cut(df['HY_num_banos'], [-1,0,1,np.inf], labels = [0,1,'+1'])\n\n# Cambiamos Num terrazas a Si/No (1/0)\ndf.loc[df['HY_num_terrazas']>1, 'HY_num_terrazas'] = 1\n\n\n# Definimos las variables a eliminar para definir nuestro conjunto X\ndrop_vars = ['HY_id', 'HY_cod_postal', 'HY_provincia', 'HY_descripcion',\n 'HY_distribucion', 'HY_tipo', 'HY_antiguedad','HY_num_banos', 'HY_cert_energ',\n 'HY_num_garajes', 'IDEA_pc_1960', 'IDEA_area', 'IDEA_poblacion', 'IDEA_densidad', 'IDEA_ind_elasticidad',\n 'Viejos', 'Medios','Nuevos']\n# Explicación:\n# + 'HY_id', 'HY_cod_postal' --> Demasiadas categorías\n# + 'HY_provincia' --> Ya tenemos PV_provincia que las agrupa\n# + 'HY_descripcion','HY_distribucion' --> Tenemos sus longitudes\n# + 'HY_tipo' --> Ya hemos creado PV_tipo\n# + 'HY_cert_energ','HY_num_garajes'--> Ya tenemos las PV asociadas (valores con 0 1)\n# + 'IDEA_pc_1960' --> Está duplicada\n# + 'IDEA_area', 'IDEA_poblacion', 'IDEA_densidad' --> Demasiados Nans\n# + 'IDEA_ind_elasticidad' --> Tenemos la variable equivalente en PV\n# + 'Viejos', 'Medios','Nuevos' --> Ya tenemos PV_clase_piso\n\nX_real_test = df.copy().drop(drop_vars, axis = 1)\n\n# Definimos las variables como en Train\ncont_vars = ['HY_metros_utiles', 'HY_metros_totales','GA_page_views', 'GA_mean_bounce',\n 'GA_exit_rate', 'GA_quincena_ini', 'GA_quincena_ult','PV_longitud_descripcion',\n 'PV_longitud_distribucion', 'PV_cantidad_imagenes',\n 'PV_ind_elasticidad', 'PV_precio_metro']\n\n# Creamos las variables Dummy para las categóricas\ndummy_vars = ['PV_provincia','PV_longitud_descripcion2',\n 'PV_clase_piso','PV_tipo','PV_num_banos']\n# Unimos nuestro conjunto con el de dummies\nX_real_test = X_real_test.join(pd.get_dummies(X_real_test[dummy_vars]))\n# Eliminamos las variables que ya son Dummies\nX_real_test = X_real_test.drop(dummy_vars, axis=1)\n\n\n############# PCA ####################\n# Realizamos una PCA con las variables IDEA\nidea_vars_price = [\n 'IDEA_unitprice_sale_residential', 'IDEA_price_sale_residential',\n 'IDEA_stock_sale_residential', 'IDEA_demand_sale_residential',\n 'IDEA_unitprice_rent_residential', 'IDEA_price_rent_residential',\n 'IDEA_stock_rent_residential', 'IDEA_demand_rent_residential']\nidea_pca_price = pca_prices.transform(X_real_test[idea_vars_price])\nX_real_test['PV_idea_pca_price'] = (idea_pca_price-idea_pca_price.min())/(idea_pca_price.max()-idea_pca_price.min())\n# Realizamos una PCA con las variables IDEA \nidea_vars_pc = [\n 'IDEA_pc_comercio',\n 'IDEA_pc_industria', 'IDEA_pc_oficina', 'IDEA_pc_otros',\n 'IDEA_pc_residencial', 'IDEA_pc_trast_parking', 'IDEA_ind_tienda',\n 'IDEA_ind_turismo', 'IDEA_ind_alimentacion', 'IDEA_ind_riqueza',\n 'IDEA_rent_alquiler', 'IDEA_ind_liquidez']\n\nidea_pca_pc = pca_pc.transform(X_real_test[idea_vars_pc])\nX_real_test['PV_idea_pca_pc'] = (idea_pca_pc-idea_pca_pc.min())/(idea_pca_pc.max()-idea_pca_pc.min())\n# Nos quedamos con la información PCA de nuestras PV \nPV_pca = pca_PV.transform(X_real_test[['PV_cert_energ',\n 'PV_provincia_Almería', 'PV_provincia_Castellón', 'PV_provincia_Murcia',\n 'PV_provincia_Otros', 'PV_provincia_Valencia',\n 'PV_longitud_descripcion2_Larga', 'PV_longitud_descripcion2_Media',\n 'PV_longitud_descripcion2_Ninguna', 'PV_clase_piso_Medios',\n 'PV_clase_piso_Nuevos', 'PV_clase_piso_Viejos', 'PV_tipo_Garaje',\n 'PV_tipo_Otros', 'PV_tipo_Piso', 'PV_num_banos_0', 'PV_num_banos_1',\n 'PV_num_banos_+1']])\n\nX_real_test['PV_pca1'] = PV_pca[:, 0]\nX_real_test['PV_pca2'] = PV_pca[:, 1]\nX_real_test['PV_pca3'] = PV_pca[:, 2]\n\n# Eliminamos las variables que ya no queremos\nX_real_test = X_real_test.drop([\n 'IDEA_unitprice_sale_residential', 'IDEA_price_sale_residential',\n 'IDEA_stock_sale_residential', 'IDEA_demand_sale_residential',\n 'IDEA_unitprice_rent_residential', 'IDEA_price_rent_residential',\n 'IDEA_stock_rent_residential', 'IDEA_demand_rent_residential',\n 'IDEA_pc_comercio',\n 'IDEA_pc_industria', 'IDEA_pc_oficina', 'IDEA_pc_otros',\n 'IDEA_pc_residencial', 'IDEA_pc_trast_parking', 'IDEA_ind_tienda',\n 'IDEA_ind_turismo', 'IDEA_ind_alimentacion', 'IDEA_ind_riqueza',\n 'IDEA_rent_alquiler', 'IDEA_ind_liquidez', 'PV_cert_energ',\n 'PV_provincia_Almería', 'PV_provincia_Castellón', 'PV_provincia_Murcia',\n 'PV_provincia_Otros', 'PV_provincia_Valencia',\n 'PV_longitud_descripcion2_Larga', 'PV_longitud_descripcion2_Media',\n 'PV_longitud_descripcion2_Ninguna', 'PV_clase_piso_Medios',\n 'PV_clase_piso_Nuevos', 'PV_clase_piso_Viejos', 'PV_tipo_Garaje',\n 'PV_tipo_Otros', 'PV_tipo_Piso', 'PV_num_banos_0', 'PV_num_banos_1',\n 'PV_num_banos_+1'], axis = 1)","_____no_output_____"],["X_real_test.columns","_____no_output_____"],["# Realizamos la predicción\ny_final_pred = xgb_model.predict(X_real_test)\n# Deshacemos el cambio\nultra_final_pred = np.exp(y_final_pred)-1\n# Guardamos el resultado\n\n# Definimos el df de solución aprovechando que tenemos el HY_id almacenado en df\ndf_solucion = pd.DataFrame({'HY_id':df['HY_id'], 'TM_Est':ultra_final_pred})\ndf_solucion.head(7)\n# Guardamos la solución\ndf_solucion.to_csv('machine predictor_UH2019.txt', \n header=True, index=False, sep='|', encoding='utf-8')","_____no_output_____"]]],"string":"[\n [\n [\n \"En este Nootebock se realiza la limpieza del conjunto train, de tal manera, que al terminar la pipeline ya se puede emplear dicho conjunto para el entrenamiento de modelos.\\n\\nSe expondrá en un pequeño comentario en la parte superior por la razon que se realiza el cambio\\n\\nPara una mejor descripción se puede consultar *PreprocesadoTrainRaw.ipynb* donde se comentan un poco mejor los pasos realizados.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"import pandas as pd\\nimport numpy as np\\nfrom matplotlib import pyplot as plt\\n\\nfrom sklearn.decomposition import PCA\\n\\ndf = pd.read_table('Modelar_UH2019.txt', sep = '|', dtype={'HY_cod_postal':str})\",\n \"_____no_output_____\"\n ],\n [\n \"df = pd.read_table('Modelar_UH2019.txt', sep = '|', dtype={'HY_cod_postal':str})\\n# Tenemos varios Nans en HY_provincias, por lo que creamos la siguiente función que nos ayudará a imputarlos con\\n# ayuda del código postal\\ndef ArreglarProvincias(df):\\n # Diccionario de los códigos postales. 'xxddd' --> xx es el código asociado a la provincia\\n diccionario_postal = {'02':'Albacete','03':'Alicante','04':'Almería','01':'Álava','33':'Asturias',\\n '05':'Avila','06':'Badajoz','07':'Baleares', '08':'Barcelona','48':'Bizkaia',\\n '09':'Burgos','10':'Cáceres','11':'Cádiz','39':'Cantabria','12':'Castellón',\\n '13':'Ciudad Real','14':'Córdoba','15':'A Coruña','16':'Cuenca','20':'Gipuzkoa',\\n '17':'Gerona','18':'Granada','19':'Guadalajara','21':'Huelva','22':'Huesca',\\n '23':'Jaén','24':'León','25':'Lérida','27':'Lugo','28':'Madrid','29':'Málaga',\\n '30':'Murcia','31':'Navarra','32':'Ourense','34':'Palencia','35':'Las Palmas',\\n '36':'Pontevedra','26':'La Rioja','37':'Salamanca','38':'Tenerife','40':'Segovia',\\n '41':'Sevilla','42':'Soria','43':'Tarragona','44':'Teruel','45':'Toledo','46':'Valencia',\\n '47':'Valladolid','49':'Zamora','50':'Zaragoza','51':'Ceuta','52':'Melilla'}\\n \\n # Obtenemos los códigos postales que nos faltan\\n codigos_postales = df.loc[df.HY_provincia.isnull()].HY_cod_postal\\n \\n # Recorremos la pareja index, value\\n for idx, cod in zip(codigos_postales.index, codigos_postales):\\n # Del cod solo nos interesan los dos primeros valores para la provincia.\\n df.loc[idx,'HY_provincia'] = diccionario_postal[cod[:2]]\\n \\n # Devolvemos el df de las provincias\\n return df\\n\\n# Obtenemos nuestro df con las provincias imputadas\\ndf = ArreglarProvincias(df)\\n\\n\\n########## Metros ##############\\n# Volvemos Nans los valores de 0m^2 o inferior --> Los 0 provocan errores en una nueva variable de €/m2\\ndf.loc[df['HY_metros_utiles'] <= 0,'HY_metros_utiles'] = np.nan\\ndf.loc[df['HY_metros_totales'] <= 0,'HY_metros_totales'] = np.nan\\n\\n# Obtenemos las posiciones de los valores faltantes een los metros útiles\\nposiciones_nans = df['HY_metros_totales'].isnull()\\n\\n# Rellenamos los Nans con los metros totales\\ndf.loc[posiciones_nans,'HY_metros_totales'] = df.loc[posiciones_nans,'HY_metros_utiles']\\n\\n# Obtenemos las posiciones de los valores faltantes een los metros útiles\\nposiciones_nans = df['HY_metros_utiles'].isnull()\\n\\n# Rellenamos los Nans con los metros totales\\ndf.loc[posiciones_nans,'HY_metros_utiles'] = df.loc[posiciones_nans,'HY_metros_totales']\\n\\n# Si continuamos teniendo Nans\\nif df[['HY_metros_utiles', 'HY_metros_totales']].isnull().sum().sum()>0: # Hay 2 .sum para sumarlo todo\\n # Agrupamos por HY_tipo\\n group_tipo = df[['HY_tipo', 'HY_metros_utiles', 'HY_metros_totales']].dropna().groupby('HY_tipo').mean()\\n # Cuales son los indices de los registros que tienen nans\\n index_nans = df.index[df['HY_metros_utiles'].isnull()]\\n for i in index_nans:\\n tipo = df.loc[i, 'HY_tipo']\\n df.loc[i, ['HY_metros_utiles', 'HY_metros_totales']] = group_tipo.loc[tipo]\\n \\n# Eliminamos los outliers\\n# Definimos la cota a partir de la cual son outliers\\ncota = df['HY_metros_utiles'].mean()+3*df['HY_metros_utiles'].std()\\n# Y nos quedamos con todos aquellos que no la superan\\ndf = df[df['HY_metros_utiles'] <= cota]\\n# Idem para metros totales\\n# Definimos la cota a partir de la cual son outliers\\ncota = df['HY_metros_totales'].mean()+3*df['HY_metros_totales'].std()\\n# Y nos quedamos con todos aquellos que no la superan\\ndf = df[df['HY_metros_totales'] <= cota]\\n\\n# Por último, eliminamos los registros que presenten una diferencia excesiva de metros\\ndif_metros = np.abs(df.HY_metros_utiles - df.HY_metros_totales)\\ndf = df[dif_metros <= 500]\\n\\n########## Precios ############\\n# Creamos una nueva variable que sea ¿Existe precio anterior?--> Si/No\\ndf['PV_precio_anterior'] = df['HY_precio_anterior'].isnull()\\n# Y modificamos precio anterior para que tenga los valores del precio actual como anterior\\ndf.loc[df['HY_precio_anterior'].isnull(),'HY_precio_anterior'] = df.loc[df['HY_precio_anterior'].isnull(),'HY_precio']\\n# Eliminamos también los precios irrisorios (Todos aquellos precios inferiores a 100€)\\nv = df[['HY_precio', 'HY_precio_anterior']].apply(lambda x: x[0] <= 100 and x[1] <= 100, axis = 1)\\ndf = df[v == False]\\n\\n\\n\\n######## Descripción y distribución #########\\n# Creamos 2 nuevas variables con la longitud del texto expuesto (Nan = 0)\\n# Igualamos los NaN a carácteres vacíos\\ndf.loc[df['HY_descripcion'].isnull(),'HY_descripcion'] = ''\\ndf.loc[df['HY_distribucion'].isnull(),'HY_distribucion'] = ''\\n# Calculamos su longitud\\ndf['PV_longitud_descripcion'] = df['HY_descripcion'].apply(lambda x: len(x))\\ndf['PV_longitud_distribucion'] = df['HY_distribucion'].apply(lambda x: len(x))\\n\\n####### Cantidad de imágenes #########\\n# Añadimos una nueva columna que es la cantidad de imágenes que tiene asociado el piso\\n# El df de información de las imágenes tiene 3 columnas: id, posicion_foto, carácteres_aleatorios\\ndf_imagenes = pd.read_csv('df_info_imagenes.csv', sep = '|',encoding = 'utf-8')\\n# Realizamos un count de los ids de las imagenes (Y nos quedamos con el valor de la \\n# variable Posiciones (Al ser un count, nos es indiferente la variable seleccionada))\\ndf_count_imagenes = df_imagenes.groupby('HY_id').count()['Posiciones']\\n# Definimos la función que asocia a cada id su número de imágenes\\ndef AñadirCantidadImagenes(x):\\n try:\\n return df_count_imagenes.loc[x]\\n except:\\n return 0\\n# Creamos la variable\\ndf['PV_cantidad_imagenes'] = df['HY_id'].apply(lambda x: AñadirCantidadImagenes(x))\\n\\n\\n######### Imputación de las variables IDEA #########\\n# En el notebook ImputacionNans.ipynb se explica en mayor profundidad las funciones definidas. Por el momento, \\n# para imputar los valores Nans de las variables IDEA realizamos lo siguiente:\\n# -1. Hacemos la media de las variables que no son Nan por CP\\n# -2. Imputamos por la media del CP\\n# -3. Repetimos para aquellos codigos postales que son todo Nans con la media por provincias (Sin contar los imputados)\\n# -4. Imputamos los Nans que faltan por la media general de todo (Sin contar los imputados)\\nvar_list = [\\n ['IDEA_pc_1960', 'IDEA_pc_1960_69', 'IDEA_pc_1970_79', 'IDEA_pc_1980_89','IDEA_pc_1990_99', 'IDEA_pc_2000_10'],\\n ['IDEA_pc_comercio','IDEA_pc_industria', 'IDEA_pc_oficina', 'IDEA_pc_otros','IDEA_pc_residencial', 'IDEA_pc_trast_parking'],\\n ['IDEA_ind_tienda', 'IDEA_ind_turismo', 'IDEA_ind_alimentacion'],\\n ['IDEA_ind_riqueza'],\\n ['IDEA_rent_alquiler'],\\n ['IDEA_ind_elasticidad', 'IDEA_ind_liquidez'],\\n ['IDEA_unitprice_sale_residential', 'IDEA_price_sale_residential', 'IDEA_stock_sale_residential'],\\n ['IDEA_demand_sale_residential'],\\n ['IDEA_unitprice_rent_residential', 'IDEA_price_rent_residential', 'IDEA_stock_rent_residential'],\\n ['IDEA_demand_rent_residential'] \\n]\\n# Función que imputa Nans por la media de CP o Provincias (La versión de ImputacionNans.ipynb imprime el número\\n# de valores faltantes después de la imputación)\\ndef ImputarNans_cp(df, vars_imput, var): \\n '''\\n df --> Nuestro dataframe a modificar\\n vars_imput --> Variables que queremos imputar.\\n var --> Variable por la que queremos realizar la agrupación (HY_cod_postal ó HY_provincia)\\n '''\\n # Obtenemos nuestros df de grupos\\n group_cp = df[[var]+vars_imput].dropna().groupby(var).mean()\\n \\n # Obtenemos los CP que son Nans\\n codigos_nans = df.loc[df[vars_imput[0]].isnull(), var] # Valdría cualquiera de las 6 variables.\\n \\n # Como sabemos que códigos podremos completar y cuales no, solo utilizaremos los que se pueden completar\\n cods = np.intersect1d(codigos_nans.unique(),group_cp.index)\\n # Cuales son los índices de los Nans\\n index_nan = df.index[df[vars_imput[0]].isnull()]\\n for cod in cods:\\n # Explicación del indexado: De todos los códigos que coinciden con el nuestro nos quedamos con los que tienen índice\\n # nan, y para poder acceder a df, necesitamos los índices de Nan que cumplen lo del código.\\n i = index_nan[(df[var] == cod)[index_nan]]\\n df.loc[i, vars_imput] = group_cp.loc[cod].values\\n \\n # Devolvemos los dataframes\\n return df, group_cp\\n# Bucle que va variable por variable imputando los valores\\nfor vars_group in var_list:\\n #print('*'*50)\\n #print('Variables:', vars_group)\\n #print('-'*10+' CP '+'-'*10)\\n df, group_cp = ImputarNans_cp(df, vars_group, var = 'HY_cod_postal')\\n #print('-'*10+' Provincias '+'-'*10)\\n df, group_provincia = ImputarNans_cp(df, vars_group, var = 'HY_provincia')\\n \\n # Si aún quedan Nans los ponemos a todos con la media de todo\\n registros_faltantes = df[vars_group[0]].isnull().sum()\\n if registros_faltantes>0:\\n #print('-'*30)\\n df.loc[df[vars_group[0]].isnull(), vars_group] = group_provincia.mean(axis = 0).values\\n #print('Se han imputado {} registros por la media de todo'.format(registros_faltantes))\\n # Guardamos los datos en la carpeta DF_grupos ya que tenemos que imputar en test por estos mismos valores.\\n df.to_csv('./DF_grupos/df_filled_{}.csv'.format(vars_group[0]), sep = '|', encoding='utf-8', index = False)\\n group_cp.to_csv('./DF_grupos/group_cp_{}.csv'.format(vars_group[0]), sep = '|', encoding='utf-8')\\n group_provincia.to_csv('./DF_grupos/group_prov_{}.csv'.format(vars_group[0]), sep = '|', encoding='utf-8')\\n\\n####### Indice elasticidad ##########\\n# Creamos una nueva variable que redondea el indice de elasticidad al entero más cercano (La variable toma 1,2,3,4,5)\\ndf['PV_ind_elasticidad'] = np.round(df['IDEA_ind_elasticidad'])\\n\\n###### Antigüedad zona #########\\n# Definimos la variable de antigüedad de la zona dependiendo del porcentaje de pisos construidos en la zona\\n# Primero tomaremos las variables [IDEA_pc_1960,IDEA_pc_1960_69,IDEA_pc_1970_79,IDEA_pc_1980_89,\\n# IDEA_pc_1990_99,IDEA_pc_2000_10] y las transformaremos en solo 3. Y luego nos quedaremos \\n# con el máximo de esas tres para determinar el estado de la zona.\\ndf['Viejos'] = df[['IDEA_pc_1960', 'IDEA_pc_1960_69']].sum(axis = 1)\\ndf['Medios'] = df[['IDEA_pc_1970_79', 'IDEA_pc_1980_89']].sum(axis = 1)\\ndf['Nuevos'] = df[['IDEA_pc_1990_99', 'IDEA_pc_2000_10']].sum(axis = 1)\\ndf['PV_clase_piso'] = df[['Viejos','Medios','Nuevos']].idxmax(axis = 1)\\n\\n# Añadimos una nueva variable que es si la longitud de la descripción es nula, va de 0 a 1000 carácteres, ó supera los 1000\\ndf['PV_longitud_descripcion2'] = pd.cut(df['PV_longitud_descripcion'], bins = [-1,0,1000, np.inf], labels=['Ninguna', 'Media', 'Larga'], include_lowest=False)\\n\\n# Precio de euro el metro\\ndf['PV_precio_metro'] = df.HY_precio/df.HY_metros_totales\\n\\n# Cambiamos Provincias por 'Castellón','Murcia','Almería','Valencia','Otros'\\ndef estructurar_provincias(x):\\n '''\\n Funcion que asocia a x (Nombre de provincia) su clase\\n '''\\n # Lista de clases que nos queremos quedar\\n if x in ['Castellón','Murcia','Almería','Valencia']:\\n return x\\n else:\\n return 'Otros'\\ndf['PV_provincia'] = df.HY_provincia.apply(lambda x: estructurar_provincias(x))\\n\\n# Una nueva que es si el inmueble presenta alguna distribución\\ndf.loc[df['PV_longitud_distribucion'] > 0,'PV_longitud_distribucion'] = 1\\n\\n# Cambiamos certificado energetico a Si/No (1/0)\\ndf['PV_cert_energ'] = df['HY_cert_energ'].apply(lambda x: np.sum(x != 'No'))\\n\\n# Cambiamos las categorías de HY_tipo a solo 3: [Piso, Garaje, Otros]\\ndef CategorizarHY_tipo(dato):\\n if dato in ['Piso', 'Garaje']:\\n return dato\\n else:\\n return 'Otros'\\ndf['PV_tipo'] = df['HY_tipo'].apply(CategorizarHY_tipo)\\n\\n# Cambiamos la variable Garaje a Tiene/No tiene (1/0)\\ndf.loc[df['HY_num_garajes']>1,'HY_num_garajes'] = 1\\n\\n# Cambiamos baños por 0, 1, +1 (No tiene, tiene 1, tiene mas de 1)\\ndf['PV_num_banos'] = pd.cut(df['HY_num_banos'], [-1,0,1,np.inf], labels = [0,1,'+1'])\\n\\n# Cambiamos Num terrazas a Si/No (1/0)\\ndf.loc[df['HY_num_terrazas']>1, 'HY_num_terrazas'] = 1\\n\\n\\n# Definimos las variables a eliminar para definir nuestro conjunto X\\ndrop_vars = ['HY_id', 'HY_cod_postal', 'HY_provincia', 'HY_descripcion',\\n 'HY_distribucion', 'HY_tipo', 'HY_antiguedad','HY_num_banos', 'HY_cert_energ',\\n 'HY_num_garajes', 'IDEA_pc_1960', 'IDEA_area', 'IDEA_poblacion', 'IDEA_densidad', 'IDEA_ind_elasticidad',\\n 'Viejos', 'Medios','Nuevos']\\n# Explicación:\\n# + 'HY_id', 'HY_cod_postal' --> Demasiadas categorías\\n# + 'HY_provincia' --> Ya tenemos PV_provincia que las agrupa\\n# + 'HY_descripcion','HY_distribucion' --> Tenemos sus longitudes\\n# + 'HY_tipo' --> Ya hemos creado PV_tipo\\n# + 'HY_cert_energ','HY_num_garajes'--> Ya tenemos las PV asociadas (valores con 0 1)\\n# + 'IDEA_pc_1960' --> Está duplicada\\n# + 'IDEA_area', 'IDEA_poblacion', 'IDEA_densidad' --> Demasiados Nans\\n# + 'IDEA_ind_elasticidad' --> Tenemos la variable equivalente en PV\\n# + 'Viejos', 'Medios','Nuevos' --> Ya tenemos PV_clase_piso\\n# + 'TARGET' --> Por motivos obvios no la queremos en X\\nX = df.copy().drop(drop_vars+['TARGET'],axis = 1)\\ny = df.TARGET.copy()\\n\\n# Eliminamos los outliers de las siguientes variables\\ncont_vars = ['HY_metros_utiles', 'HY_metros_totales','GA_page_views', 'GA_mean_bounce',\\n 'GA_exit_rate', 'GA_quincena_ini', 'GA_quincena_ult','PV_longitud_descripcion',\\n 'PV_longitud_distribucion', 'PV_cantidad_imagenes',\\n 'PV_ind_elasticidad', 'PV_precio_metro']\\nfor var in cont_vars:\\n cota = X[var].mean()+3*X[var].std()\\n y = y[X[var]<=cota]\\n X = X[X[var]<=cota]\\n# Y eliminamos los Outliers de nuestra variable respuesta\\nX = X[y <= y.mean()+3*y.std()]\\ny = y[y <= y.mean()+3*y.std()]\\n# Realizamos el logaritmo de nuestra variable respuesta (Nota: Sumamos 1 para evitar log(0))\\ny = np.log(y+1)\\n\\n\\n# Creamos las variables Dummy para las categóricas\\ndummy_vars = ['PV_provincia','PV_longitud_descripcion2',\\n 'PV_clase_piso','PV_tipo','PV_num_banos']\\n# Unimos nuestro conjunto con el de dummies\\nX = X.join(pd.get_dummies(X[dummy_vars]))\\n# Eliminamos las variables que ya son Dummies\\nX = X.drop(dummy_vars, axis=1)\\n\\n\\n############# PCA ####################\\n# Realizamos una PCA con las variables IDEA (Nota: soolo tomamos 1 componente porque nos explica el 99.95% de la varianza)\\nidea_vars_price = [\\n 'IDEA_unitprice_sale_residential', 'IDEA_price_sale_residential',\\n 'IDEA_stock_sale_residential', 'IDEA_demand_sale_residential',\\n 'IDEA_unitprice_rent_residential', 'IDEA_price_rent_residential',\\n 'IDEA_stock_rent_residential', 'IDEA_demand_rent_residential']\\n\\npca_prices = PCA(n_components=1)\\nidea_pca_price = pca_prices.fit_transform(X[idea_vars_price])\\nX['PV_idea_pca_price'] = (idea_pca_price-idea_pca_price.min())/(idea_pca_price.max()-idea_pca_price.min())\\n# Realizamos una PCA con las variables IDEA (Nota: soolo tomamos 1 componente porque nos explica el 78% de la varianza)\\nidea_vars_pc = [\\n 'IDEA_pc_comercio',\\n 'IDEA_pc_industria', 'IDEA_pc_oficina', 'IDEA_pc_otros',\\n 'IDEA_pc_residencial', 'IDEA_pc_trast_parking', 'IDEA_ind_tienda',\\n 'IDEA_ind_turismo', 'IDEA_ind_alimentacion', 'IDEA_ind_riqueza',\\n 'IDEA_rent_alquiler', 'IDEA_ind_liquidez']\\n\\npca_pc = PCA(n_components=1)\\nidea_pca_pc = pca_pc.fit_transform(X[idea_vars_pc])\\nX['PV_idea_pca_pc'] = (idea_pca_pc-idea_pca_pc.min())/(idea_pca_pc.max()-idea_pca_pc.min())\\n\\n# Nos quedamos con la información PCA de nuestras PV \\npca_PV = PCA(n_components=3)\\nPV_pca = pca_PV.fit_transform(X[['PV_cert_energ',\\n 'PV_provincia_Almería', 'PV_provincia_Castellón', 'PV_provincia_Murcia',\\n 'PV_provincia_Otros', 'PV_provincia_Valencia',\\n 'PV_longitud_descripcion2_Larga', 'PV_longitud_descripcion2_Media',\\n 'PV_longitud_descripcion2_Ninguna', 'PV_clase_piso_Medios',\\n 'PV_clase_piso_Nuevos', 'PV_clase_piso_Viejos', 'PV_tipo_Garaje',\\n 'PV_tipo_Otros', 'PV_tipo_Piso', 'PV_num_banos_0', 'PV_num_banos_1',\\n 'PV_num_banos_+1']])\\n\\nX['PV_pca1'] = PV_pca[:, 0]\\nX['PV_pca2'] = PV_pca[:, 1]\\nX['PV_pca3'] = PV_pca[:, 2]\\n\\n# Eliminamos los posibles outliers creados\\npca_vars = ['PV_idea_pca_price', 'PV_idea_pca_pc','PV_pca1', 'PV_pca2', 'PV_pca3']\\nfor var in pca_vars:\\n cota = X[var].mean()+3*X[var].std()\\n y = y[X[var]<=cota]\\n X = X[X[var]<=cota]\\n\\nX = X.drop([\\n 'IDEA_unitprice_sale_residential', 'IDEA_price_sale_residential',\\n 'IDEA_stock_sale_residential', 'IDEA_demand_sale_residential',\\n 'IDEA_unitprice_rent_residential', 'IDEA_price_rent_residential',\\n 'IDEA_stock_rent_residential', 'IDEA_demand_rent_residential',\\n 'IDEA_pc_comercio',\\n 'IDEA_pc_industria', 'IDEA_pc_oficina', 'IDEA_pc_otros',\\n 'IDEA_pc_residencial', 'IDEA_pc_trast_parking', 'IDEA_ind_tienda',\\n 'IDEA_ind_turismo', 'IDEA_ind_alimentacion', 'IDEA_ind_riqueza',\\n 'IDEA_rent_alquiler', 'IDEA_ind_liquidez', 'PV_cert_energ',\\n 'PV_provincia_Almería', 'PV_provincia_Castellón', 'PV_provincia_Murcia',\\n 'PV_provincia_Otros', 'PV_provincia_Valencia',\\n 'PV_longitud_descripcion2_Larga', 'PV_longitud_descripcion2_Media',\\n 'PV_longitud_descripcion2_Ninguna', 'PV_clase_piso_Medios',\\n 'PV_clase_piso_Nuevos', 'PV_clase_piso_Viejos', 'PV_tipo_Garaje',\\n 'PV_tipo_Otros', 'PV_tipo_Piso', 'PV_num_banos_0', 'PV_num_banos_1',\\n 'PV_num_banos_+1'], axis = 1)\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# Entrenamiento de modelos\\n\\nHemos entrenado una gran cantidad de modelos, incluso podríamos llegar a decir que más de 1000 (a base de bucles y funciones) para ver cual es el que más se ajusta a nuestro dataset. Y para no tenerlos nadando entre los cientos de pruebas que hemos reaalizado en los notebooks *Modelos2\\\\_TestingsModelos.ipynb*, *Modelos3\\\\_featureSelection.ipynb*, *Modelos4\\\\_ForwardAndEnsemble.ipynb*\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"import pandas as pd\\nimport numpy as np\\nfrom matplotlib import pyplot as plt\\n\\nfrom sklearn.model_selection import train_test_split\\n\\nfrom sklearn import linear_model\\nfrom sklearn.linear_model import LogisticRegression\\nfrom sklearn import svm\\nfrom sklearn import neighbors\\nfrom sklearn import tree\\nfrom sklearn.ensemble import RandomForestRegressor\\nfrom sklearn.ensemble import ExtraTreesRegressor\\nfrom sklearn.ensemble import GradientBoostingRegressor\\nfrom sklearn.neural_network import MLPRegressor\\n\\nimport xgboost as xgb\\nfrom xgboost.sklearn import XGBRegressor\\n\\n# Métrica\\nfrom sklearn.metrics import median_absolute_error\\n\\n\\nmodels = {\\n 'DecisionTreeRegressor10':tree.DecisionTreeRegressor(max_depth = 10),\\n 'RandomForestRegressor20':RandomForestRegressor(max_depth=5, n_estimators = 20, random_state=0),\\n 'RandomForestRegressor50':RandomForestRegressor(max_depth=10, n_estimators = 50, random_state=0),\\n 'RandomForestRegressor100':RandomForestRegressor(max_depth=10, n_estimators = 100, random_state=0),\\n 'ExtraTreesRegressor10':ExtraTreesRegressor(n_estimators=10,random_state=0),\\n 'ExtraTreesRegressor100':ExtraTreesRegressor(n_estimators=100, random_state=0),\\n 'ExtraTreesRegressor150':ExtraTreesRegressor(n_estimators=150, random_state=0),\\n 'GradientBoostingRegressor30_md5':GradientBoostingRegressor(n_estimators=30, learning_rate=0.1, max_depth=5, random_state=0, loss='ls'),\\n 'GradientBoostingRegressor50_md5':GradientBoostingRegressor(n_estimators=50, learning_rate=0.1, max_depth=5, random_state=0, loss='ls'),\\n 'XGB25':XGBRegressor(max_depth = 10, n_estimators=25, random_state=7),\\n 'XGB46':XGBRegressor(max_depth = 10, n_estimators=46, random_state=7),\\n 'XGB60':XGBRegressor(max_depth = 10, n_estimators=60, random_state=7),\\n 'XGB100':XGBRegressor(max_depth = 10, n_estimators=100, random_state=7)\\n }\\n\\ndef EntrenarModelos(X, y, models, drop_vars):\\n '''\\n X, y --> Nuestra data\\n models --> Diccionario de modelos a entrenar\\n drop_vars --> Variables que no queremos en nuestro modelo\\n '''\\n X_train, X_test, y_train, y_test = train_test_split(X.drop(drop_vars, axis = 1), y, test_size=0.3, random_state=7)\\n \\n y_test_predict = {}\\n errores = {}\\n # Definimos el diccionario donde vamos guardando el mejor modelo con su error asociado\\n minimo = {'':np.inf}\\n for name, model in models.items():\\n #try:\\n model = model.fit(X_train, y_train)\\n y_test_predict[name] = model.predict(X_test)\\n errores[name] = median_absolute_error(np.exp(y_test)-1, np.exp(y_test_predict[name])-1)\\n \\n print(name,': ', errores[name], sep = '')\\n \\n # Actualizamos el diccionario\\n if list(minimo.values())[0] > errores[name]:\\n minimo = {name:errores[name]}\\n return minimo\\n\\nEntrenarModelos(X, y, models, [])\",\n \"DecisionTreeRegressor10: 21.55431393653663\\nRandomForestRegressor20: 18.580995303598044\\nRandomForestRegressor50: 19.072373408609195\\nRandomForestRegressor100: 18.861664050362826\\nExtraTreesRegressor10: 19.80307387148771\\nExtraTreesRegressor100: 18.588761921652768\\nExtraTreesRegressor150: 18.57115721270116\\nGradientBoostingRegressor30_md5: 19.084825961682014\\nGradientBoostingRegressor50_md5: 18.973164773235773\\nXGB25: 18.734364471435548\\nXGB46: 18.948498382568367\\nXGB60: 19.172454528808608\\nXGB100: 19.46763259887696\\n\"\n ]\n ],\n [\n [\n \"Para la optimización de los parámetros implementamos un grid search manual con el que vamos variando los parámetros mediante bucles for. Nosotros encontramos el óptimo en *n_estimators= 30, reg_lambda* = 0.9, *subsample = 0.6*, *colsample_bytree = 0.7*\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"models = {'BestXGBoost' : XGBRegressor(max_depth = 10, \\n n_estimators= 30, \\n reg_lambda = 0.9,\\n subsample = 0.6,\\n colsample_bytree = 0.7,\\n objective = 'reg:linear',\\n random_state=7)\\n }\",\n \"_____no_output_____\"\n ],\n [\n \"EntrenarModelos(X, y, models, [])\",\n \"BestXGBoost: 17.369460296630855\\n\"\n ],\n [\n \"# Variable que indica si queremos iniciar la búsqueda\\nQuererBuscar = False\\n\\nif QuererBuscar == True:\\n models = {}\\n for i1 in [30, 40, 46, 50, 60]:\\n for i2 in [0.7, 0.8, 0.9, 1]:\\n for i3 in [0.5, 0.6, 0.7, 0.8, 0.9, 1]:\\n for i4 in [0.5, 0.6, 0.7, 0.8, 0.9, 1]:\\n models['XGB_{}_{}_{}_{}'.format(i1, i2, i3, i4)] = XGBRegressor(max_depth = 10, \\n n_estimators= i1, \\n reg_lambda = i2,\\n subsample = i3,\\n colsample_bytree = i4,\\n objective = 'reg:linear',\\n random_state=7)\\n print(len(models))\\n \\nelse:\\n models = {'BestXGBoost' : XGBRegressor(max_depth = 10, \\n n_estimators= 30, \\n reg_lambda = 0.9,\\n subsample = 0.6,\\n colsample_bytree = 0.7,\\n objective = 'reg:linear',\\n random_state=7)\\n }\\n \\nEntrenarModelos(X, y, models, [])\",\n \"BestXGBoost: 17.369460296630855\\n\"\n ]\n ],\n [\n [\n \"Una vez definido el mejor modelo vamos a realizar una búsqueda de las mejores variables. Y para ello definimos una función forward que nos vaya añadiendo variables según su error.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"def Entrenar(X,y,model):\\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=7)\\n \\n model = model.fit(X_train, y_train)\\n \\n y_pred = model.predict(X_test)\\n error = median_absolute_error(np.exp(y_test)-1, np.exp(y_pred)-1)\\n \\n return error\\n\\ndef EntrenarForward(X, y, model, ini_vars):\\n '''\\n X,y --> Nuestra data\\n model --> un modelo\\n ini_vars --> variables con las que comenzamos\\n '''\\n # Variable que indica si hemos terminado\\n fin = False\\n \\n # Variables con las que estamos trabajando\\n current_vars = ini_vars\\n all_vars = X.columns\\n possible_vars = np.setdiff1d(all_vars, current_vars)\\n \\n while not fin and len(possible_vars) > 0: # Lo que antes pase\\n possible_vars = np.setdiff1d(all_vars, current_vars)\\n \\n if len(current_vars) == 0:\\n # Si no tenemos variables, cuestro error es inf\\n best_error = np.inf\\n else:\\n base_error = Entrenar(X[current_vars], y, model)\\n best_error = base_error\\n \\n best_var = ''\\n for var in possible_vars:\\n var_error = Entrenar(X[current_vars+[var]],y,model)\\n \\n if var_error < best_error:\\n best_error = var_error\\n best_var = var\\n \\n print('Best var: {} --> {:.4f}'.format(best_var, best_error))\\n # Si tenemos una best_var \\n if len(best_var) > 0:\\n current_vars += [best_var]\\n else: \\n fin = True\\n \\n print('Best vars:', current_vars)\\n print('Best error:', best_error)\\n \\n return best_error\",\n \"_____no_output_____\"\n ],\n [\n \"EntrenarForward(X, y, XGBRegressor(max_depth = 10, \\n n_estimators= 30, \\n reg_lambda = 0.9,\\n subsample = 0.6,\\n colsample_bytree = 0.7,\\n objective = 'reg:linear',\\n random_state=7), \\n [])\",\n \"Best var: GA_page_views --> 18.9004\\nBest var: GA_mean_bounce --> 18.6871\\nBest var: IDEA_pc_1970_79 --> 18.4164\\nBest var: PV_pca2 --> 18.1765\\nBest var: PV_longitud_descripcion --> 18.1751\\nBest var: --> 18.1751\\nBest vars: ['GA_page_views', 'GA_mean_bounce', 'IDEA_pc_1970_79', 'PV_pca2', 'PV_longitud_descripcion']\\nBest error: 18.17510498046875\\n\"\n ],\n [\n \"EntrenarForward(X, y, XGBRegressor(max_depth = 10, \\n n_estimators= 30, \\n reg_lambda = 0.9,\\n subsample = 0.6,\\n colsample_bytree = 0.7,\\n objective = 'reg:linear',\\n random_state=7), \\n ['HY_precio'])\",\n \"Best var: GA_page_views --> 18.8182\\nBest var: PV_pca2 --> 18.6006\\nBest var: IDEA_pc_1960_69 --> 18.4076\\nBest var: GA_mean_bounce --> 18.2948\\nBest var: GA_exit_rate --> 18.1165\\nBest var: PV_longitud_descripcion --> 18.1131\\nBest var: IDEA_pc_2000_10 --> 18.0981\\nBest var: IDEA_pc_1990_99 --> 17.9477\\nBest var: PV_pca3 --> 17.8531\\nBest var: --> 17.8531\\nBest vars: ['HY_precio', 'GA_page_views', 'PV_pca2', 'IDEA_pc_1960_69', 'GA_mean_bounce', 'GA_exit_rate', 'PV_longitud_descripcion', 'IDEA_pc_2000_10', 'IDEA_pc_1990_99', 'PV_pca3']\\nBest error: 17.85312286376954\\n\"\n ]\n ],\n [\n [\n \"Observemos las feature importances de nuestro mejor árbol ya que no mejoramos con el forward.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=7)\\n \\nxgb_model = XGBRegressor(max_depth = 10, \\n n_estimators= 30, \\n reg_lambda = 0.9,\\n subsample = 0.6,\\n colsample_bytree = 0.7,\\n objective = 'reg:linear',\\n random_state=7).fit(X_train, y_train)\\ny_test_predict = xgb_model.predict(X_test)\\nerror = median_absolute_error(np.exp(y_test)-1, np.exp(y_test_predict)-1)\\n \\nprint('BestXGBoost: ', error, sep = '')\",\n \"BestXGBoost: 17.369460296630855\\n\"\n ],\n [\n \"v = xgb_model.feature_importances_\\nplt.figure(figsize=(8,6))\\nplt.bar(range(len(v)), v)\\nplt.title('Feature Importances')\\nplt.xticks(range(len(X_train.columns)), list(X_train.columns), rotation = 90)\\nplt.show()\",\n \"_____no_output_____\"\n ],\n [\n \"# Ordenamos las variables de menor a mayor\\nfeatures_ordered, colnames_ordered = zip(*sorted(zip(xgb_model.feature_importances_, X_train.columns)))\\n# No tenemos en cuenta las 10 peores variables\\nEntrenarModelos(X, y, models, list(colnames_ordered[10:]))\",\n \"BestXGBoost: 20.791393280029297\\n\"\n ]\n ],\n [\n [\n \"Por lo que el mejor modelo es el primer XGBoost entrenado\",\n \"_____no_output_____\"\n ],\n [\n \"# Conjunto de Test\\n\\nRealizamos las mismas transforaciones para test\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"df = pd.read_table('Estimar_UH2019.txt', sep = '|', dtype={'HY_cod_postal':str})\\n# Tenemos varios Nans en HY_provincias, por lo que creamos la siguiente función que nos ayudará a imputarlos con\\n# ayuda del código postal\\ndef ArreglarProvincias(df):\\n # Diccionario de los códigos postales. 'xxddd' --> xx es el código asociado a la provincia\\n diccionario_postal = {'02':'Albacete','03':'Alicante','04':'Almería','01':'Álava','33':'Asturias',\\n '05':'Avila','06':'Badajoz','07':'Baleares', '08':'Barcelona','48':'Bizkaia',\\n '09':'Burgos','10':'Cáceres','11':'Cádiz','39':'Cantabria','12':'Castellón',\\n '13':'Ciudad Real','14':'Córdoba','15':'A Coruña','16':'Cuenca','20':'Gipuzkoa',\\n '17':'Gerona','18':'Granada','19':'Guadalajara','21':'Huelva','22':'Huesca',\\n '23':'Jaén','24':'León','25':'Lérida','27':'Lugo','28':'Madrid','29':'Málaga',\\n '30':'Murcia','31':'Navarra','32':'Ourense','34':'Palencia','35':'Las Palmas',\\n '36':'Pontevedra','26':'La Rioja','37':'Salamanca','38':'Tenerife','40':'Segovia',\\n '41':'Sevilla','42':'Soria','43':'Tarragona','44':'Teruel','45':'Toledo','46':'Valencia',\\n '47':'Valladolid','49':'Zamora','50':'Zaragoza','51':'Ceuta','52':'Melilla'}\\n \\n # Obtenemos los códigos postales que nos faltan\\n codigos_postales = df.loc[df.HY_provincia.isnull()].HY_cod_postal\\n \\n # Recorremos la pareja index, value\\n for idx, cod in zip(codigos_postales.index, codigos_postales):\\n # Del cod solo nos interesan los dos primeros valores para la provincia.\\n df.loc[idx,'HY_provincia'] = diccionario_postal[cod[:2]]\\n \\n # Devolvemos el df de las provincias\\n return df\\n\\n# Obtenemos nuestro df con las provincias imputadas\\ndf = ArreglarProvincias(df)\\n\\n\\n########## Metros ##############\\n# Volvemos Nans los valores de 0m^2 o inferior --> Los 0 provocan errores en una nueva variable de €/m2\\ndf.loc[df['HY_metros_utiles'] <= 0,'HY_metros_utiles'] = np.nan\\ndf.loc[df['HY_metros_totales'] <= 0,'HY_metros_totales'] = np.nan\\n\\n# Obtenemos las posiciones de los valores faltantes en los metros útiles\\nposiciones_nans = df['HY_metros_totales'].isnull()\\n# Rellenamos los Nans con los metros totales\\ndf.loc[posiciones_nans,'HY_metros_totales'] = df.loc[posiciones_nans,'HY_metros_utiles']\\n\\n# Obtenemos las posiciones de los valores faltantes een los metros útiles\\nposiciones_nans = df['HY_metros_utiles'].isnull()\\n# Rellenamos los Nans con los metros totales\\ndf.loc[posiciones_nans,'HY_metros_utiles'] = df.loc[posiciones_nans,'HY_metros_totales']\\n\\n# Si continuamos teniendo Nans\\nif df[['HY_metros_utiles', 'HY_metros_totales']].isnull().sum().sum()>0: # Hay 2 .sum para sumarlo todo\\n # Cuales son los indices de los registros que tienen nans\\n index_nans = df.index[df['HY_metros_utiles'].isnull()]\\n for i in index_nans:\\n tipo = df.loc[i, 'HY_tipo']\\n df.loc[i, ['HY_metros_utiles', 'HY_metros_totales']] = group_tipo.loc[tipo] # Recuperamos group_tipo\\n \\n\\n########## Precios ############\\n# Creamos una nueva variable que sea ¿Existe precio anterior?--> Si/No\\ndf['PV_precio_anterior'] = df['HY_precio_anterior'].isnull()\\n# Y modificamos precio anterior para que tenga los valores del precio actual como anterior\\ndf.loc[df['HY_precio_anterior'].isnull(),'HY_precio_anterior'] = df.loc[df['HY_precio_anterior'].isnull(),'HY_precio']\\n\\n\\n######## Descripción y distribución #########\\n# Creamos 2 nuevas variables con la longitud del texto expuesto (Nan = 0)\\n# Igualamos los NaN a carácteres vacíos\\ndf.loc[df['HY_descripcion'].isnull(),'HY_descripcion'] = ''\\ndf.loc[df['HY_distribucion'].isnull(),'HY_distribucion'] = ''\\n# Calculamos su longitud\\ndf['PV_longitud_descripcion'] = df['HY_descripcion'].apply(lambda x: len(x))\\ndf['PV_longitud_distribucion'] = df['HY_distribucion'].apply(lambda x: len(x))\\n\\n####### Cantidad de imágenes #########\\n# Añadimos una nueva columna que es la cantidad de imágenes que tiene asociado el piso\\n# El df de información de las imágenes tiene 3 columnas: id, posicion_foto, carácteres_aleatorios\\ndf_imagenes = pd.read_csv('df_info_imagenes.csv', sep = '|',encoding = 'utf-8')\\n# Realizamos un count de los ids de las imagenes (Y nos quedamos con el valor de la \\n# variable Posiciones (Al ser un count, nos es indiferente la variable seleccionada))\\ndf_count_imagenes = df_imagenes.groupby('HY_id').count()['Posiciones']\\n# Definimos la función que asocia a cada id su número de imágenes\\ndef AñadirCantidadImagenes(x):\\n try:\\n return df_count_imagenes.loc[x]\\n except:\\n return 0\\n# Creamos la variable\\ndf['PV_cantidad_imagenes'] = df['HY_id'].apply(lambda x: AñadirCantidadImagenes(x))\\n\\n\\n######### Imputación de las variables IDEA #########\\n# En el notebook ImputacionNans.ipynb se explica en mayor profundidad las funciones definidas. Por el momento, \\n# para imputar los valores Nans de las variables IDEA realizamos lo siguiente:\\n# -1. Hacemos la media de las variables que no son Nan por CP\\n# -2. Imputamos por la media del CP\\n# -3. Repetimos para aquellos codigos postales que son todo Nans con la media por provincias (Sin contar los imputados)\\n# -4. Imputamos los Nans que faltan por la media general de todo (Sin contar los imputados)\\nvar_list = [\\n ['IDEA_pc_1960', 'IDEA_pc_1960_69', 'IDEA_pc_1970_79', 'IDEA_pc_1980_89','IDEA_pc_1990_99', 'IDEA_pc_2000_10'],\\n ['IDEA_pc_comercio','IDEA_pc_industria', 'IDEA_pc_oficina', 'IDEA_pc_otros','IDEA_pc_residencial', 'IDEA_pc_trast_parking'],\\n ['IDEA_ind_tienda', 'IDEA_ind_turismo', 'IDEA_ind_alimentacion'],\\n ['IDEA_ind_riqueza'],\\n ['IDEA_rent_alquiler'],\\n ['IDEA_ind_elasticidad', 'IDEA_ind_liquidez'],\\n ['IDEA_unitprice_sale_residential', 'IDEA_price_sale_residential', 'IDEA_stock_sale_residential'],\\n ['IDEA_demand_sale_residential'],\\n ['IDEA_unitprice_rent_residential', 'IDEA_price_rent_residential', 'IDEA_stock_rent_residential'],\\n ['IDEA_demand_rent_residential'] \\n]\\n\\n# Función para arregla los codigos postales mal leidos (Son leidos como enteros).\\ndef ArreglarCP(cp):\\n if len(cp)==4:\\n return '0'+cp\\n else:\\n return cp\\n \\ndef ImputarNans_test(df, vars_imput, var):\\n '''\\n df --> Nuestro dataframe a modificar\\n vars_imput --> Variables que queremos imputar.\\n var --> Variable por la que queremos realizar la agrupación (HY_cod_postal ó HY_provincia)\\n '''\\n # Obtenemos nuestros df definidos durante el Train\\n if var == 'HY_cod_postal':\\n # Hay un error en la escritura del CP, ya que se guardó como int\\n group_cp = pd.read_csv('./DF_grupos/group_cp_{}.csv'.format(vars_imput[0]), sep = '|', encoding='utf-8', dtype={'HY_cod_postal': str})\\n group_cp['HY_cod_postal'] = group_cp['HY_cod_postal'].apply(lambda x: ArreglarCP(x))\\n group_cp.index = group_cp['HY_cod_postal']\\n group_cp = group_cp.drop('HY_cod_postal',axis = 1)\\n elif var == 'HY_provincia':\\n group_cp = pd.read_csv('./DF_grupos/group_prov_{}.csv'.format(vars_imput[0]), sep = '|', encoding='utf-8', index_col='HY_provincia')\\n else:\\n print('Solo se acepta HY_cod_postal ó HY_provincia como valor de \\\"var\\\"')\\n \\n # Obtenemos los CP que son Nans\\n codigos_nans = df.loc[df[vars_imput[0]].isnull(), var] # Valdría cualquiera de las variables.\\n \\n # Como sabemos que códigos podremos completar y cuales no, solo utilizaremos los que se pueden completar\\n cods = np.intersect1d(codigos_nans.unique(),group_cp.index)\\n # Cuales son los índices de los Nans\\n index_nan = df.index[df[vars_imput[0]].isnull()]\\n for cod in cods:\\n # Explicación del indexado: De todos los códigos que coinciden con el nuestro nos quedamos con los que tienen índice\\n # nan, y para poder acceder a df, necesitamos los índices de Nan que cumplen lo del código.\\n i = index_nan[(df[var] == cod)[index_nan]]\\n df.loc[i, vars_imput] = group_cp.loc[cod].values\\n \\n # Si ya hemos terminado de imputar y aún nos quedan Nans imputamos por la media de todo\\n if var == 'HY_provincia' and df[vars_imput[0]].isnull().sum()>0:\\n df.loc[df[vars_imput[0]].isnull(), vars_imput] = group_cp.mean(axis = 0).values\\n \\n # Devolvemos el dataframe imputado\\n return df\\n\\n# Como en el caso anterior, vamos conjunto por conjunto\\nfor vars_group in var_list:\\n df = ImputarNans_test(df, vars_group, var = 'HY_cod_postal')\\n df = ImputarNans_test(df, vars_group, var = 'HY_provincia')\\n\\n####### Indice elasticidad ##########\\n# Creamos una nueva variable que redondea el indice de elasticidad al entero más cercano (La variable toma 1,2,3,4,5)\\ndf['PV_ind_elasticidad'] = np.round(df['IDEA_ind_elasticidad'])\\n\\n###### Antigüedad zona #########\\n# Definimos la variable de antigüedad de la zona dependiendo del porcentaje de pisos construidos en la zona\\n# Primero tomaremos las variables [IDEA_pc_1960,IDEA_pc_1960_69,IDEA_pc_1970_79,IDEA_pc_1980_89,\\n# IDEA_pc_1990_99,IDEA_pc_2000_10] y las transformaremos en solo 3. Y luego nos quedaremos \\n# con el máximo de esas tres para determinar el estado de la zona.\\ndf['Viejos'] = df[['IDEA_pc_1960', 'IDEA_pc_1960_69']].sum(axis = 1)\\ndf['Medios'] = df[['IDEA_pc_1970_79', 'IDEA_pc_1980_89']].sum(axis = 1)\\ndf['Nuevos'] = df[['IDEA_pc_1990_99', 'IDEA_pc_2000_10']].sum(axis = 1)\\ndf['PV_clase_piso'] = df[['Viejos','Medios','Nuevos']].idxmax(axis = 1)\\n\\n# Añadimos una nueva variable que es si la longitud de la descripción es nula, va de 0 a 1000 carácteres, ó supera los 1000\\ndf['PV_longitud_descripcion2'] = pd.cut(df['PV_longitud_descripcion'], bins = [-1,0,1000, np.inf], labels=['Ninguna', 'Media', 'Larga'], include_lowest=False)\\n\\n# Precio de euro el metro\\ndf['PV_precio_metro'] = df.HY_precio/df.HY_metros_totales\\n\\n# Cambiamos Provincias por 'Castellón','Murcia','Almería','Valencia','Otros'\\ndef estructurar_provincias(x):\\n '''\\n Funcion que asocia a x (Nombre de provincia) su clase\\n '''\\n # Lista de clases que nos queremos quedar\\n if x in ['Castellón','Murcia','Almería','Valencia']:\\n return x\\n else:\\n return 'Otros'\\ndf['PV_provincia'] = df.HY_provincia.apply(lambda x: estructurar_provincias(x))\\n\\n# Una nueva que es si el inmueble presenta alguna distribución\\ndf.loc[df['PV_longitud_distribucion'] > 0,'PV_longitud_distribucion'] = 1\\n\\n# Cambiamos certificado energetico a Si/No (1/0)\\ndf['PV_cert_energ'] = df['HY_cert_energ'].apply(lambda x: np.sum(x != 'No'))\\n\\n# Cambiamos las categorías de HY_tipo a solo 3: [Piso, Garaje, Otros]\\ndef CategorizarHY_tipo(dato):\\n if dato in ['Piso', 'Garaje']:\\n return dato\\n else:\\n return 'Otros'\\ndf['PV_tipo'] = df['HY_tipo'].apply(CategorizarHY_tipo)\\n\\n# Cambiamos la variable Garaje a Tiene/No tiene (1/0)\\ndf.loc[df['HY_num_garajes']>1,'HY_num_garajes'] = 1\\n\\n# Cambiamos baños por 0, 1, +1 (No tiene, tiene 1, tiene mas de 1)\\ndf['PV_num_banos'] = pd.cut(df['HY_num_banos'], [-1,0,1,np.inf], labels = [0,1,'+1'])\\n\\n# Cambiamos Num terrazas a Si/No (1/0)\\ndf.loc[df['HY_num_terrazas']>1, 'HY_num_terrazas'] = 1\\n\\n\\n# Definimos las variables a eliminar para definir nuestro conjunto X\\ndrop_vars = ['HY_id', 'HY_cod_postal', 'HY_provincia', 'HY_descripcion',\\n 'HY_distribucion', 'HY_tipo', 'HY_antiguedad','HY_num_banos', 'HY_cert_energ',\\n 'HY_num_garajes', 'IDEA_pc_1960', 'IDEA_area', 'IDEA_poblacion', 'IDEA_densidad', 'IDEA_ind_elasticidad',\\n 'Viejos', 'Medios','Nuevos']\\n# Explicación:\\n# + 'HY_id', 'HY_cod_postal' --> Demasiadas categorías\\n# + 'HY_provincia' --> Ya tenemos PV_provincia que las agrupa\\n# + 'HY_descripcion','HY_distribucion' --> Tenemos sus longitudes\\n# + 'HY_tipo' --> Ya hemos creado PV_tipo\\n# + 'HY_cert_energ','HY_num_garajes'--> Ya tenemos las PV asociadas (valores con 0 1)\\n# + 'IDEA_pc_1960' --> Está duplicada\\n# + 'IDEA_area', 'IDEA_poblacion', 'IDEA_densidad' --> Demasiados Nans\\n# + 'IDEA_ind_elasticidad' --> Tenemos la variable equivalente en PV\\n# + 'Viejos', 'Medios','Nuevos' --> Ya tenemos PV_clase_piso\\n\\nX_real_test = df.copy().drop(drop_vars, axis = 1)\\n\\n# Definimos las variables como en Train\\ncont_vars = ['HY_metros_utiles', 'HY_metros_totales','GA_page_views', 'GA_mean_bounce',\\n 'GA_exit_rate', 'GA_quincena_ini', 'GA_quincena_ult','PV_longitud_descripcion',\\n 'PV_longitud_distribucion', 'PV_cantidad_imagenes',\\n 'PV_ind_elasticidad', 'PV_precio_metro']\\n\\n# Creamos las variables Dummy para las categóricas\\ndummy_vars = ['PV_provincia','PV_longitud_descripcion2',\\n 'PV_clase_piso','PV_tipo','PV_num_banos']\\n# Unimos nuestro conjunto con el de dummies\\nX_real_test = X_real_test.join(pd.get_dummies(X_real_test[dummy_vars]))\\n# Eliminamos las variables que ya son Dummies\\nX_real_test = X_real_test.drop(dummy_vars, axis=1)\\n\\n\\n############# PCA ####################\\n# Realizamos una PCA con las variables IDEA\\nidea_vars_price = [\\n 'IDEA_unitprice_sale_residential', 'IDEA_price_sale_residential',\\n 'IDEA_stock_sale_residential', 'IDEA_demand_sale_residential',\\n 'IDEA_unitprice_rent_residential', 'IDEA_price_rent_residential',\\n 'IDEA_stock_rent_residential', 'IDEA_demand_rent_residential']\\nidea_pca_price = pca_prices.transform(X_real_test[idea_vars_price])\\nX_real_test['PV_idea_pca_price'] = (idea_pca_price-idea_pca_price.min())/(idea_pca_price.max()-idea_pca_price.min())\\n# Realizamos una PCA con las variables IDEA \\nidea_vars_pc = [\\n 'IDEA_pc_comercio',\\n 'IDEA_pc_industria', 'IDEA_pc_oficina', 'IDEA_pc_otros',\\n 'IDEA_pc_residencial', 'IDEA_pc_trast_parking', 'IDEA_ind_tienda',\\n 'IDEA_ind_turismo', 'IDEA_ind_alimentacion', 'IDEA_ind_riqueza',\\n 'IDEA_rent_alquiler', 'IDEA_ind_liquidez']\\n\\nidea_pca_pc = pca_pc.transform(X_real_test[idea_vars_pc])\\nX_real_test['PV_idea_pca_pc'] = (idea_pca_pc-idea_pca_pc.min())/(idea_pca_pc.max()-idea_pca_pc.min())\\n# Nos quedamos con la información PCA de nuestras PV \\nPV_pca = pca_PV.transform(X_real_test[['PV_cert_energ',\\n 'PV_provincia_Almería', 'PV_provincia_Castellón', 'PV_provincia_Murcia',\\n 'PV_provincia_Otros', 'PV_provincia_Valencia',\\n 'PV_longitud_descripcion2_Larga', 'PV_longitud_descripcion2_Media',\\n 'PV_longitud_descripcion2_Ninguna', 'PV_clase_piso_Medios',\\n 'PV_clase_piso_Nuevos', 'PV_clase_piso_Viejos', 'PV_tipo_Garaje',\\n 'PV_tipo_Otros', 'PV_tipo_Piso', 'PV_num_banos_0', 'PV_num_banos_1',\\n 'PV_num_banos_+1']])\\n\\nX_real_test['PV_pca1'] = PV_pca[:, 0]\\nX_real_test['PV_pca2'] = PV_pca[:, 1]\\nX_real_test['PV_pca3'] = PV_pca[:, 2]\\n\\n# Eliminamos las variables que ya no queremos\\nX_real_test = X_real_test.drop([\\n 'IDEA_unitprice_sale_residential', 'IDEA_price_sale_residential',\\n 'IDEA_stock_sale_residential', 'IDEA_demand_sale_residential',\\n 'IDEA_unitprice_rent_residential', 'IDEA_price_rent_residential',\\n 'IDEA_stock_rent_residential', 'IDEA_demand_rent_residential',\\n 'IDEA_pc_comercio',\\n 'IDEA_pc_industria', 'IDEA_pc_oficina', 'IDEA_pc_otros',\\n 'IDEA_pc_residencial', 'IDEA_pc_trast_parking', 'IDEA_ind_tienda',\\n 'IDEA_ind_turismo', 'IDEA_ind_alimentacion', 'IDEA_ind_riqueza',\\n 'IDEA_rent_alquiler', 'IDEA_ind_liquidez', 'PV_cert_energ',\\n 'PV_provincia_Almería', 'PV_provincia_Castellón', 'PV_provincia_Murcia',\\n 'PV_provincia_Otros', 'PV_provincia_Valencia',\\n 'PV_longitud_descripcion2_Larga', 'PV_longitud_descripcion2_Media',\\n 'PV_longitud_descripcion2_Ninguna', 'PV_clase_piso_Medios',\\n 'PV_clase_piso_Nuevos', 'PV_clase_piso_Viejos', 'PV_tipo_Garaje',\\n 'PV_tipo_Otros', 'PV_tipo_Piso', 'PV_num_banos_0', 'PV_num_banos_1',\\n 'PV_num_banos_+1'], axis = 1)\",\n \"_____no_output_____\"\n ],\n [\n \"X_real_test.columns\",\n \"_____no_output_____\"\n ],\n [\n \"# Realizamos la predicción\\ny_final_pred = xgb_model.predict(X_real_test)\\n# Deshacemos el cambio\\nultra_final_pred = np.exp(y_final_pred)-1\\n# Guardamos el resultado\\n\\n# Definimos el df de solución aprovechando que tenemos el HY_id almacenado en df\\ndf_solucion = pd.DataFrame({'HY_id':df['HY_id'], 'TM_Est':ultra_final_pred})\\ndf_solucion.head(7)\\n# Guardamos la solución\\ndf_solucion.to_csv('machine predictor_UH2019.txt', \\n header=True, index=False, sep='|', encoding='utf-8')\",\n \"_____no_output_____\"\n ]\n ]\n]"},"cell_types":{"kind":"list like","value":["markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code"],"string":"[\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\"\n]"},"cell_type_groups":{"kind":"list like","value":[["markdown"],["code","code"],["markdown"],["code"],["markdown"],["code","code","code"],["markdown"],["code","code","code"],["markdown"],["code","code","code"],["markdown","markdown"],["code","code","code"]],"string":"[\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\"\n ],\n [\n \"markdown\",\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\"\n ]\n]"}}},{"rowIdx":1458825,"cells":{"hexsha":{"kind":"string","value":"e7e3b001cfa528bbbf9e43e4edc79cfc94c19d8a"},"size":{"kind":"number","value":4053,"string":"4,053"},"ext":{"kind":"string","value":"ipynb"},"lang":{"kind":"string","value":"Jupyter Notebook"},"max_stars_repo_path":{"kind":"string","value":"1_BlackScholes_naive.ipynb"},"max_stars_repo_name":{"kind":"string","value":"IntelPython/workshop"},"max_stars_repo_head_hexsha":{"kind":"string","value":"4d17792515f383ceab4fd80a2d6f79834d32fa50"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":13,"string":"13"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2017-11-16T12:54:57.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2021-11-21T21:54:22.000Z"},"max_issues_repo_path":{"kind":"string","value":"1_BlackScholes_naive.ipynb"},"max_issues_repo_name":{"kind":"string","value":"IntelPython/workshop"},"max_issues_repo_head_hexsha":{"kind":"string","value":"4d17792515f383ceab4fd80a2d6f79834d32fa50"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"1_BlackScholes_naive.ipynb"},"max_forks_repo_name":{"kind":"string","value":"IntelPython/workshop"},"max_forks_repo_head_hexsha":{"kind":"string","value":"4d17792515f383ceab4fd80a2d6f79834d32fa50"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"number","value":2,"string":"2"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2017-11-28T13:29:47.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2018-07-23T08:06:31.000Z"},"avg_line_length":{"kind":"number","value":21.7903225806,"string":"21.790323"},"max_line_length":{"kind":"number","value":87,"string":"87"},"alphanum_fraction":{"kind":"number","value":0.4448556625,"string":"0.444856"},"cells":{"kind":"list like","value":[[["# Black Scholes Exercise 1: Naive implementation\n\n- Use cProfile and Line Profiler to look for bottlenecks and hotspots in the code","_____no_output_____"]],[["# Boilerplate for the example\n\nimport cProfile\nimport pstats\n\ntry:\n import numpy.random_intel as rnd\nexcept:\n import numpy.random as rnd\n\n# make xrange available in python 3\ntry:\n xrange\nexcept NameError:\n xrange = range\n\nSEED = 7777777\nS0L = 10.0\nS0H = 50.0\nXL = 10.0\nXH = 50.0\nTL = 1.0\nTH = 2.0\nRISK_FREE = 0.1\nVOLATILITY = 0.2\nTEST_ARRAY_LENGTH = 1024\n\n###############################################\n\ndef gen_data(nopt):\n return (\n rnd.uniform(S0L, S0H, nopt),\n rnd.uniform(XL, XH, nopt),\n rnd.uniform(TL, TH, nopt),\n )\n\nnopt=100000\nprice, strike, t = gen_data(nopt)\ncall = [0.0 for i in range(nopt)]\nput = [-1.0 for i in range(nopt)]\nprice=list(price)\nstrike=list(strike)\nt=list(t)","_____no_output_____"]],[["# The Naive Black Scholes algorithm (looped)","_____no_output_____"]],[["from math import log, sqrt, exp, erf\ninvsqrt = lambda x: 1.0/sqrt(x)\n\ndef black_scholes(nopt, price, strike, t, rate, vol, call, put):\n mr = -rate\n sig_sig_two = vol * vol * 2\n \n for i in range(nopt):\n P = float( price [i] )\n S = strike [i]\n T = t [i]\n \n a = log(P / S)\n b = T * mr\n \n z = T * sig_sig_two\n c = 0.25 * z\n y = invsqrt(z)\n \n w1 = (a - b + c) * y\n w2 = (a - b - c) * y\n \n d1 = 0.5 + 0.5 * erf(w1)\n d2 = 0.5 + 0.5 * erf(w2)\n \n Se = exp(b) * S\n \n call [i] = P * d1 - Se * d2\n put [i] = call [i] - P + Se","_____no_output_____"]],[["# Timeit and CProfile Tests\n\nWhat do you notice about the times?\n\n%timeit function(args)\n\n%prun function(args)","_____no_output_____"],["# Line_Profiler tests\n\nHow many times does the function items get called (hits)?","_____no_output_____"]],[["%load_ext line_profiler","_____no_output_____"]],[["%lprun -f function function(args)","_____no_output_____"]]],"string":"[\n [\n [\n \"# Black Scholes Exercise 1: Naive implementation\\n\\n- Use cProfile and Line Profiler to look for bottlenecks and hotspots in the code\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# Boilerplate for the example\\n\\nimport cProfile\\nimport pstats\\n\\ntry:\\n import numpy.random_intel as rnd\\nexcept:\\n import numpy.random as rnd\\n\\n# make xrange available in python 3\\ntry:\\n xrange\\nexcept NameError:\\n xrange = range\\n\\nSEED = 7777777\\nS0L = 10.0\\nS0H = 50.0\\nXL = 10.0\\nXH = 50.0\\nTL = 1.0\\nTH = 2.0\\nRISK_FREE = 0.1\\nVOLATILITY = 0.2\\nTEST_ARRAY_LENGTH = 1024\\n\\n###############################################\\n\\ndef gen_data(nopt):\\n return (\\n rnd.uniform(S0L, S0H, nopt),\\n rnd.uniform(XL, XH, nopt),\\n rnd.uniform(TL, TH, nopt),\\n )\\n\\nnopt=100000\\nprice, strike, t = gen_data(nopt)\\ncall = [0.0 for i in range(nopt)]\\nput = [-1.0 for i in range(nopt)]\\nprice=list(price)\\nstrike=list(strike)\\nt=list(t)\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# The Naive Black Scholes algorithm (looped)\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"from math import log, sqrt, exp, erf\\ninvsqrt = lambda x: 1.0/sqrt(x)\\n\\ndef black_scholes(nopt, price, strike, t, rate, vol, call, put):\\n mr = -rate\\n sig_sig_two = vol * vol * 2\\n \\n for i in range(nopt):\\n P = float( price [i] )\\n S = strike [i]\\n T = t [i]\\n \\n a = log(P / S)\\n b = T * mr\\n \\n z = T * sig_sig_two\\n c = 0.25 * z\\n y = invsqrt(z)\\n \\n w1 = (a - b + c) * y\\n w2 = (a - b - c) * y\\n \\n d1 = 0.5 + 0.5 * erf(w1)\\n d2 = 0.5 + 0.5 * erf(w2)\\n \\n Se = exp(b) * S\\n \\n call [i] = P * d1 - Se * d2\\n put [i] = call [i] - P + Se\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# Timeit and CProfile Tests\\n\\nWhat do you notice about the times?\\n\\n%timeit function(args)\\n\\n%prun function(args)\",\n \"_____no_output_____\"\n ],\n [\n \"# Line_Profiler tests\\n\\nHow many times does the function items get called (hits)?\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"%load_ext line_profiler\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"%lprun -f function function(args)\",\n \"_____no_output_____\"\n ]\n ]\n]"},"cell_types":{"kind":"list like","value":["markdown","code","markdown","code","markdown","code","markdown"],"string":"[\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\"\n]"},"cell_type_groups":{"kind":"list like","value":[["markdown"],["code"],["markdown"],["code"],["markdown","markdown"],["code"],["markdown"]],"string":"[\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\",\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\"\n ]\n]"}}},{"rowIdx":1458826,"cells":{"hexsha":{"kind":"string","value":"e7e3c21af0d75cff286235dfcc4573ae3400dd2c"},"size":{"kind":"number","value":232097,"string":"232,097"},"ext":{"kind":"string","value":"ipynb"},"lang":{"kind":"string","value":"Jupyter Notebook"},"max_stars_repo_path":{"kind":"string","value":"Visualizations/AvarageDailyErrors.ipynb"},"max_stars_repo_name":{"kind":"string","value":"SGBC/weatherdata"},"max_stars_repo_head_hexsha":{"kind":"string","value":"c04ade1b104ef2d94b471bdd1baebdef4c9e9ce0"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"Visualizations/AvarageDailyErrors.ipynb"},"max_issues_repo_name":{"kind":"string","value":"SGBC/weatherdata"},"max_issues_repo_head_hexsha":{"kind":"string","value":"c04ade1b104ef2d94b471bdd1baebdef4c9e9ce0"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"number","value":1,"string":"1"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2020-12-11T18:31:05.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2021-01-11T15:04:51.000Z"},"max_forks_repo_path":{"kind":"string","value":"Visualizations/AvarageDailyErrors.ipynb"},"max_forks_repo_name":{"kind":"string","value":"SGBC/weatherdata"},"max_forks_repo_head_hexsha":{"kind":"string","value":"c04ade1b104ef2d94b471bdd1baebdef4c9e9ce0"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"avg_line_length":{"kind":"number","value":1126.6844660194,"string":"1,126.684466"},"max_line_length":{"kind":"number","value":225396,"string":"225,396"},"alphanum_fraction":{"kind":"number","value":0.9517529309,"string":"0.951753"},"cells":{"kind":"list like","value":[[["# Load data.\nfrom METCOMP_utils import *\nstation_ids = ['40013','40010','25754','40003','24768','40005','23470','25786','24856','23658','40004','23659','25652','20949','40145','40007','40143','22234']\n\n# param_dict: Dictionary translating SMHI parameter names to corresponding parameters in reference.\n# Example: param_dict = {'t': 'ref_temperature', 'prec1h': 'ref_precipitation', ...}\nparam_dict = {'t': 'TM', 'prec1h': 'RR', 'r': 'UM', 'ws': 'FM2'}\n\nstart_date = datetime.date(2017, 3, 1)\nend_date = datetime.date(2020, 2, 29)\nMESAN_data = {}\nLANTMET_data = {}\nfor station in station_ids:\n print('Loading ' + station + '...')\n MESAN_data[station] = read_CSV(station, 'MESAN', start_date, end_date)\n LANTMET_data[station] = read_CSV(station, 'LANTMET', start_date, end_date)\n\n# Unit conversion if needed.\nfor station in station_ids:\n LANTMET_data[station][param_dict['r']] = LANTMET_data[station][param_dict['r']]/100","Loading 40013...\nLoading 40010...\nLoading 25754...\nLoading 40003...\nLoading 24768...\nLoading 40005...\nLoading 23470...\nLoading 25786...\nLoading 24856...\nLoading 23658...\nLoading 40004...\nLoading 23659...\nLoading 25652...\nLoading 20949...\nLoading 40145...\nLoading 40007...\nLoading 40143...\nLoading 22234...\n"],["import matplotlib.pyplot as plt\nimport numpy as np\n\nparam = 't'\n# param_dict: Dictionary translating SMHI parameter names to corresponding parameters in reference.\n# Example: param_dict = {'t': 'ref_temperature', 'prec1h': 'ref_precipitation', ...}\nparam_dict = {'t': 'TM', 'prec1h': 'RR', 'r': 'UM', 'ws': 'FM2'}\nseasons = {'spring': [3, 4, 5], 'summer': [6, 7, 8], 'fall': [9, 10, 11], 'winter': [12, 1, 2]}\nstations = ['40013','40010','25754','40003','24768','40005','23470','25786','24856','23658','40004','23659','25652','20949','40145','40007','40143','22234']\n\n\nylims = []\nfig, axs = plt.subplots(1, 4, figsize = (16, 16))\nfor station in stations:\n \n print('Working on ' + station + '...')\n \n df_MESAN = MESAN_data[station]\n df_LANTMET = LANTMET_data[station]\n \n MESAN_months = divide_months(df_MESAN)\n LANTMET_months = divide_months(df_LANTMET)\n \n index = 0\n for season in seasons:\n \n # Get season dataframe.\n MESAN_season = None\n LANTMET_season = None\n for month in seasons[season]:\n MESAN_season = pd.concat([MESAN_season, MESAN_months[month]], axis=0, ignore_index=True)\n LANTMET_season = pd.concat([LANTMET_season, LANTMET_months[month]], axis=0, ignore_index=True)\n \n MESAN_hours = divide_hours(MESAN_season)\n LANTMET_hours = divide_hours(LANTMET_season)\n \n hours = [h for h in range(0, 24)]\n avg_day_error = []\n for hour in hours:\n tmp_MESAN = MESAN_hours[hour][param].mean(skipna=True)\n tmp_LANTMET = LANTMET_hours[hour][param_dict[param]].mean(skipna=True)\n avg_day_error.append(abs(tmp_MESAN - tmp_LANTMET))\n \n \n #print(avg_day_error)\n \n if param == 'r':\n avg_day_error = np.array(avg_day_error)*100\n ylims.append(max(avg_day_error))\n axs[index].plot(hours, avg_day_error)\n \n index = index + 1\n\nindex = 0\nfor season in seasons:\n \n axs[index].set_xlabel('Hours', fontsize=16)\n axs[index].set_title(season, fontsize=16)\n axs[index].set_xlim([0, 23])\n axs[index].set_ylim([0, max(ylims)])\n x0,x1 = axs[index].get_xlim()\n y0,y1 = axs[index].get_ylim()\n axs[index].set_aspect(abs(x1-x0)/abs(y1-y0))\n \n index = index + 1\n \nif param == 't':\n axs[0].set_ylabel('Temperature (°C)', fontsize=16)\nif param == 'r':\n axs[0].set_ylabel('Relative humidity (%)', fontsize=16)\nif param == 'prec1h':\n axs[0].set_ylabel('Precipitation (mm)', fontsize=16)\nif param == 'ws':\n axs[0].set_ylabel('Wind speed (m/s)', fontsize=16)\n ","Working on 40013...\nWorking on 40010...\nWorking on 25754...\nWorking on 40003...\nWorking on 24768...\nWorking on 40005...\nWorking on 23470...\nWorking on 25786...\nWorking on 24856...\nWorking on 23658...\nWorking on 40004...\nWorking on 23659...\nWorking on 25652...\nWorking on 20949...\nWorking on 40145...\nWorking on 40007...\nWorking on 40143...\nWorking on 22234...\n"]]],"string":"[\n [\n [\n \"# Load data.\\nfrom METCOMP_utils import *\\nstation_ids = ['40013','40010','25754','40003','24768','40005','23470','25786','24856','23658','40004','23659','25652','20949','40145','40007','40143','22234']\\n\\n# param_dict: Dictionary translating SMHI parameter names to corresponding parameters in reference.\\n# Example: param_dict = {'t': 'ref_temperature', 'prec1h': 'ref_precipitation', ...}\\nparam_dict = {'t': 'TM', 'prec1h': 'RR', 'r': 'UM', 'ws': 'FM2'}\\n\\nstart_date = datetime.date(2017, 3, 1)\\nend_date = datetime.date(2020, 2, 29)\\nMESAN_data = {}\\nLANTMET_data = {}\\nfor station in station_ids:\\n print('Loading ' + station + '...')\\n MESAN_data[station] = read_CSV(station, 'MESAN', start_date, end_date)\\n LANTMET_data[station] = read_CSV(station, 'LANTMET', start_date, end_date)\\n\\n# Unit conversion if needed.\\nfor station in station_ids:\\n LANTMET_data[station][param_dict['r']] = LANTMET_data[station][param_dict['r']]/100\",\n \"Loading 40013...\\nLoading 40010...\\nLoading 25754...\\nLoading 40003...\\nLoading 24768...\\nLoading 40005...\\nLoading 23470...\\nLoading 25786...\\nLoading 24856...\\nLoading 23658...\\nLoading 40004...\\nLoading 23659...\\nLoading 25652...\\nLoading 20949...\\nLoading 40145...\\nLoading 40007...\\nLoading 40143...\\nLoading 22234...\\n\"\n ],\n [\n \"import matplotlib.pyplot as plt\\nimport numpy as np\\n\\nparam = 't'\\n# param_dict: Dictionary translating SMHI parameter names to corresponding parameters in reference.\\n# Example: param_dict = {'t': 'ref_temperature', 'prec1h': 'ref_precipitation', ...}\\nparam_dict = {'t': 'TM', 'prec1h': 'RR', 'r': 'UM', 'ws': 'FM2'}\\nseasons = {'spring': [3, 4, 5], 'summer': [6, 7, 8], 'fall': [9, 10, 11], 'winter': [12, 1, 2]}\\nstations = ['40013','40010','25754','40003','24768','40005','23470','25786','24856','23658','40004','23659','25652','20949','40145','40007','40143','22234']\\n\\n\\nylims = []\\nfig, axs = plt.subplots(1, 4, figsize = (16, 16))\\nfor station in stations:\\n \\n print('Working on ' + station + '...')\\n \\n df_MESAN = MESAN_data[station]\\n df_LANTMET = LANTMET_data[station]\\n \\n MESAN_months = divide_months(df_MESAN)\\n LANTMET_months = divide_months(df_LANTMET)\\n \\n index = 0\\n for season in seasons:\\n \\n # Get season dataframe.\\n MESAN_season = None\\n LANTMET_season = None\\n for month in seasons[season]:\\n MESAN_season = pd.concat([MESAN_season, MESAN_months[month]], axis=0, ignore_index=True)\\n LANTMET_season = pd.concat([LANTMET_season, LANTMET_months[month]], axis=0, ignore_index=True)\\n \\n MESAN_hours = divide_hours(MESAN_season)\\n LANTMET_hours = divide_hours(LANTMET_season)\\n \\n hours = [h for h in range(0, 24)]\\n avg_day_error = []\\n for hour in hours:\\n tmp_MESAN = MESAN_hours[hour][param].mean(skipna=True)\\n tmp_LANTMET = LANTMET_hours[hour][param_dict[param]].mean(skipna=True)\\n avg_day_error.append(abs(tmp_MESAN - tmp_LANTMET))\\n \\n \\n #print(avg_day_error)\\n \\n if param == 'r':\\n avg_day_error = np.array(avg_day_error)*100\\n ylims.append(max(avg_day_error))\\n axs[index].plot(hours, avg_day_error)\\n \\n index = index + 1\\n\\nindex = 0\\nfor season in seasons:\\n \\n axs[index].set_xlabel('Hours', fontsize=16)\\n axs[index].set_title(season, fontsize=16)\\n axs[index].set_xlim([0, 23])\\n axs[index].set_ylim([0, max(ylims)])\\n x0,x1 = axs[index].get_xlim()\\n y0,y1 = axs[index].get_ylim()\\n axs[index].set_aspect(abs(x1-x0)/abs(y1-y0))\\n \\n index = index + 1\\n \\nif param == 't':\\n axs[0].set_ylabel('Temperature (°C)', fontsize=16)\\nif param == 'r':\\n axs[0].set_ylabel('Relative humidity (%)', fontsize=16)\\nif param == 'prec1h':\\n axs[0].set_ylabel('Precipitation (mm)', fontsize=16)\\nif param == 'ws':\\n axs[0].set_ylabel('Wind speed (m/s)', fontsize=16)\\n \",\n \"Working on 40013...\\nWorking on 40010...\\nWorking on 25754...\\nWorking on 40003...\\nWorking on 24768...\\nWorking on 40005...\\nWorking on 23470...\\nWorking on 25786...\\nWorking on 24856...\\nWorking on 23658...\\nWorking on 40004...\\nWorking on 23659...\\nWorking on 25652...\\nWorking on 20949...\\nWorking on 40145...\\nWorking on 40007...\\nWorking on 40143...\\nWorking on 22234...\\n\"\n ]\n ]\n]"},"cell_types":{"kind":"list like","value":["code"],"string":"[\n \"code\"\n]"},"cell_type_groups":{"kind":"list like","value":[["code","code"]],"string":"[\n [\n \"code\",\n \"code\"\n ]\n]"}}},{"rowIdx":1458827,"cells":{"hexsha":{"kind":"string","value":"e7e40050746d4860af586c6a472d6d372606b35e"},"size":{"kind":"number","value":309718,"string":"309,718"},"ext":{"kind":"string","value":"ipynb"},"lang":{"kind":"string","value":"Jupyter Notebook"},"max_stars_repo_path":{"kind":"string","value":"time_series_analysis.ipynb"},"max_stars_repo_name":{"kind":"string","value":"EAC49/timeseries_homework"},"max_stars_repo_head_hexsha":{"kind":"string","value":"923292db1d49aaff796da205b56ed18dad7a04ad"},"max_stars_repo_licenses":{"kind":"list like","value":["ADSL"],"string":"[\n \"ADSL\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"time_series_analysis.ipynb"},"max_issues_repo_name":{"kind":"string","value":"EAC49/timeseries_homework"},"max_issues_repo_head_hexsha":{"kind":"string","value":"923292db1d49aaff796da205b56ed18dad7a04ad"},"max_issues_repo_licenses":{"kind":"list like","value":["ADSL"],"string":"[\n \"ADSL\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"time_series_analysis.ipynb"},"max_forks_repo_name":{"kind":"string","value":"EAC49/timeseries_homework"},"max_forks_repo_head_hexsha":{"kind":"string","value":"923292db1d49aaff796da205b56ed18dad7a04ad"},"max_forks_repo_licenses":{"kind":"list like","value":["ADSL"],"string":"[\n \"ADSL\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"avg_line_length":{"kind":"number","value":195.529040404,"string":"195.52904"},"max_line_length":{"kind":"number","value":90432,"string":"90,432"},"alphanum_fraction":{"kind":"number","value":0.8677022324,"string":"0.867702"},"cells":{"kind":"list like","value":[[["import numpy as np\nimport pandas as pd\nfrom pathlib import Path\n%matplotlib inline","_____no_output_____"]],[["# Return Forecasting: Read Historical Daily Yen Futures Data\nIn this notebook, you will load historical Dollar-Yen exchange rate futures data and apply time series analysis and modeling to determine whether there is any predictable behavior.","_____no_output_____"]],[["# Futures contract on the Yen-dollar exchange rate:\n# This is the continuous chain of the futures contracts that are 1 month to expiration\nyen_futures = pd.read_csv(\n Path(\"yen.csv\"), index_col=\"Date\", infer_datetime_format=True, parse_dates=True\n)\nyen_futures.head()","_____no_output_____"],["# Trim the dataset to begin on January 1st, 1990\nyen_futures = yen_futures.loc[\"1990-01-01\":, :]\nyen_futures.head()","_____no_output_____"]],[[" # Return Forecasting: Initial Time-Series Plotting","_____no_output_____"],[" Start by plotting the \"Settle\" price. Do you see any patterns, long-term and/or short?","_____no_output_____"]],[["# Plot just the \"Settle\" column from the dataframe:\nyen_futures.Settle.plot(figsize=[15,10],title='Yen Future Settle Prices',legend=True)","_____no_output_____"]],[["---","_____no_output_____"],["# Decomposition Using a Hodrick-Prescott Filter","_____no_output_____"],[" Using a Hodrick-Prescott Filter, decompose the Settle price into a trend and noise.","_____no_output_____"]],[["import statsmodels.api as sm\n\n# Apply the Hodrick-Prescott Filter by decomposing the \"Settle\" price into two separate series:\nnoise, trend = sm.tsa.filters.hpfilter(yen_futures['Settle'])","_____no_output_____"],["# Create a dataframe of just the settle price, and add columns for \"noise\" and \"trend\" series from above:\ndf = yen_futures['Settle'].to_frame()\ndf['noise'] = noise\ndf['trend'] = trend\ndf.tail()","_____no_output_____"],["# Plot the Settle Price vs. the Trend for 2015 to the present\ndf.loc['2015':].plot(y=['Settle', 'trend'], figsize= (15, 10), title = 'Settle vs. Trend')","_____no_output_____"],["# Plot the Settle Noise\ndf.noise.plot(figsize= (10, 5), title = 'Noise')","_____no_output_____"]],[["---","_____no_output_____"],["# Forecasting Returns using an ARMA Model","_____no_output_____"],["Using futures Settle *Returns*, estimate an ARMA model\n\n1. ARMA: Create an ARMA model and fit it to the returns data. Note: Set the AR and MA (\"p\" and \"q\") parameters to p=2 and q=1: order=(2, 1).\n2. Output the ARMA summary table and take note of the p-values of the lags. Based on the p-values, is the model a good fit (p < 0.05)?\n3. Plot the 5-day forecast of the forecasted returns (the results forecast from ARMA model)","_____no_output_____"]],[["# Create a series using \"Settle\" price percentage returns, drop any nan\"s, and check the results:\n# (Make sure to multiply the pct_change() results by 100)\n# In this case, you may have to replace inf, -inf values with np.nan\"s\nreturns = (yen_futures[[\"Settle\"]].pct_change() * 100)\nreturns = returns.replace(-np.inf, np.nan).dropna()\nreturns.tail()","_____no_output_____"],["import statsmodels.api as sm\n\n# Estimate and ARMA model using statsmodels (use order=(2, 1))\nfrom statsmodels.tsa.arima_model import ARMA\narma_model = ARMA(returns['Settle'], order=(2,1))\n\n# Fit the model and assign it to a variable called results\nresults = arma_model.fit()","/Users/emilioacubero/opt/anaconda3/envs/dev/lib/python3.7/site-packages/statsmodels/tsa/arima_model.py:472: FutureWarning: \nstatsmodels.tsa.arima_model.ARMA and statsmodels.tsa.arima_model.ARIMA have\nbeen deprecated in favor of statsmodels.tsa.arima.model.ARIMA (note the .\nbetween arima and model) and\nstatsmodels.tsa.SARIMAX. These will be removed after the 0.12 release.\n\nstatsmodels.tsa.arima.model.ARIMA makes use of the statespace framework and\nis both well tested and maintained.\n\nTo silence this warning and continue using ARMA and ARIMA until they are\nremoved, use:\n\nimport warnings\nwarnings.filterwarnings('ignore', 'statsmodels.tsa.arima_model.ARMA',\n FutureWarning)\nwarnings.filterwarnings('ignore', 'statsmodels.tsa.arima_model.ARIMA',\n FutureWarning)\n\n warnings.warn(ARIMA_DEPRECATION_WARN, FutureWarning)\n/Users/emilioacubero/opt/anaconda3/envs/dev/lib/python3.7/site-packages/statsmodels/tsa/base/tsa_model.py:583: ValueWarning: A date index has been provided, but it has no associated frequency information and so will be ignored when e.g. forecasting.\n ' ignored when e.g. forecasting.', ValueWarning)\n This problem is unconstrained.\n"],["# Output model summary results:\nresults.summary()","_____no_output_____"],["# Plot the 5 Day Returns Forecast\npd.DataFrame(results.forecast(steps=5)[0]).plot(figsize= (10, 5), title='5 Day Returns Forcast')\n","_____no_output_____"]],[["---","_____no_output_____"],["# Forecasting the Settle Price using an ARIMA Model","_____no_output_____"],[" 1. Using the *raw* Yen **Settle Price**, estimate an ARIMA model.\n 1. Set P=5, D=1, and Q=1 in the model (e.g., ARIMA(df, order=(5,1,1))\n 2. P= # of Auto-Regressive Lags, D= # of Differences (this is usually =1), Q= # of Moving Average Lags\n 2. Output the ARIMA summary table and take note of the p-values of the lags. Based on the p-values, is the model a good fit (p < 0.05)?\n 3. Construct a 5 day forecast for the Settle Price. What does the model forecast will happen to the Japanese Yen in the near term?","_____no_output_____"]],[["from statsmodels.tsa.arima_model import ARIMA\n\n# Estimate and ARIMA Model:\n# Hint: ARIMA(df, order=(p, d, q))\narima_model = ARIMA(yen_futures['Settle'], order=(5,1,1))\n\n# Fit the model\narima_results = arima_model.fit()","/Users/emilioacubero/opt/anaconda3/envs/dev/lib/python3.7/site-packages/statsmodels/tsa/arima_model.py:472: FutureWarning: \nstatsmodels.tsa.arima_model.ARMA and statsmodels.tsa.arima_model.ARIMA have\nbeen deprecated in favor of statsmodels.tsa.arima.model.ARIMA (note the .\nbetween arima and model) and\nstatsmodels.tsa.SARIMAX. These will be removed after the 0.12 release.\n\nstatsmodels.tsa.arima.model.ARIMA makes use of the statespace framework and\nis both well tested and maintained.\n\nTo silence this warning and continue using ARMA and ARIMA until they are\nremoved, use:\n\nimport warnings\nwarnings.filterwarnings('ignore', 'statsmodels.tsa.arima_model.ARMA',\n FutureWarning)\nwarnings.filterwarnings('ignore', 'statsmodels.tsa.arima_model.ARIMA',\n FutureWarning)\n\n warnings.warn(ARIMA_DEPRECATION_WARN, FutureWarning)\n/Users/emilioacubero/opt/anaconda3/envs/dev/lib/python3.7/site-packages/statsmodels/tsa/base/tsa_model.py:583: ValueWarning: A date index has been provided, but it has no associated frequency information and so will be ignored when e.g. forecasting.\n ' ignored when e.g. forecasting.', ValueWarning)\n/Users/emilioacubero/opt/anaconda3/envs/dev/lib/python3.7/site-packages/statsmodels/tsa/base/tsa_model.py:583: ValueWarning: A date index has been provided, but it has no associated frequency information and so will be ignored when e.g. forecasting.\n ' ignored when e.g. forecasting.', ValueWarning)\n This problem is unconstrained.\n"],["# Output model summary results:\narima_results.summary()","_____no_output_____"],["# Plot the 5 Day Price Forecast\npd.DataFrame(arima_results.forecast(steps=5)[0]).plot(figsize= (10, 5), title='5 Day Futures Price Forcast')","_____no_output_____"]],[["---","_____no_output_____"],["# Volatility Forecasting with GARCH\n\nRather than predicting returns, let's forecast near-term **volatility** of Japanese Yen futures returns. Being able to accurately predict volatility will be extremely useful if we want to trade in derivatives or quantify our maximum loss.\n \nUsing futures Settle *Returns*, estimate an GARCH model\n\n1. GARCH: Create an GARCH model and fit it to the returns data. Note: Set the parameters to p=2 and q=1: order=(2, 1).\n2. Output the GARCH summary table and take note of the p-values of the lags. Based on the p-values, is the model a good fit (p < 0.05)?\n3. Plot the 5-day forecast of the volatility.","_____no_output_____"]],[["from arch import arch_model","_____no_output_____"],["# Estimate a GARCH model:\ngarch_model = arch_model(returns, mean=\"Zero\", vol=\"GARCH\", p=2, q=1)\n\n# Fit the model\ngarch_results = garch_model.fit(disp=\"off\")","_____no_output_____"],["# Summarize the model results\ngarch_results.summary()","_____no_output_____"],["# Find the last day of the dataset\nlast_day = returns.index.max().strftime('%Y-%m-%d')\nlast_day","_____no_output_____"],["# Create a 5 day forecast of volatility\nforecast_horizon = 5\n# Start the forecast using the last_day calculated above\nforecasts = garch_results.forecast(start=last_day, horizon=forecast_horizon)","/Users/emilioacubero/opt/anaconda3/envs/dev/lib/python3.7/site-packages/arch/__future__/_utility.py:21: FutureWarning: \nThe default for reindex is True. After September 2021 this will change to\nFalse. Set reindex to True or False to silence this message. Alternatively,\nyou can use the import comment\n\nfrom arch.__future__ import reindexing\n\nto globally set reindex to True and silence this warning.\n\n FutureWarning,\n"],["# Annualize the forecast\nintermediate = np.sqrt(forecasts.variance.dropna() * 252)\nintermediate.head()","_____no_output_____"],["# Transpose the forecast so that it is easier to plot\nfinal = intermediate.dropna().T\nfinal.head()","_____no_output_____"],["# Plot the final forecast\nfinal.plot(figsize= (10, 5), title='5 Day Forecast of Volatility')","_____no_output_____"]],[["---","_____no_output_____"],["# Conclusions","_____no_output_____"],["Based on your time series analysis, would you buy the yen now?\n\nIs the risk of the yen expected to increase or decrease?\n\nBased on the model evaluation, would you feel confident in using these models for trading?","_____no_output_____"],["With the return forceast predicting decreasing returns and the volatility forecast predicting increased volatility, I do not believe it is appropiate to invest in the yen after anaylzing these forecasts.","_____no_output_____"]]],"string":"[\n [\n [\n \"import numpy as np\\nimport pandas as pd\\nfrom pathlib import Path\\n%matplotlib inline\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# Return Forecasting: Read Historical Daily Yen Futures Data\\nIn this notebook, you will load historical Dollar-Yen exchange rate futures data and apply time series analysis and modeling to determine whether there is any predictable behavior.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# Futures contract on the Yen-dollar exchange rate:\\n# This is the continuous chain of the futures contracts that are 1 month to expiration\\nyen_futures = pd.read_csv(\\n Path(\\\"yen.csv\\\"), index_col=\\\"Date\\\", infer_datetime_format=True, parse_dates=True\\n)\\nyen_futures.head()\",\n \"_____no_output_____\"\n ],\n [\n \"# Trim the dataset to begin on January 1st, 1990\\nyen_futures = yen_futures.loc[\\\"1990-01-01\\\":, :]\\nyen_futures.head()\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \" # Return Forecasting: Initial Time-Series Plotting\",\n \"_____no_output_____\"\n ],\n [\n \" Start by plotting the \\\"Settle\\\" price. Do you see any patterns, long-term and/or short?\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# Plot just the \\\"Settle\\\" column from the dataframe:\\nyen_futures.Settle.plot(figsize=[15,10],title='Yen Future Settle Prices',legend=True)\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"---\",\n \"_____no_output_____\"\n ],\n [\n \"# Decomposition Using a Hodrick-Prescott Filter\",\n \"_____no_output_____\"\n ],\n [\n \" Using a Hodrick-Prescott Filter, decompose the Settle price into a trend and noise.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"import statsmodels.api as sm\\n\\n# Apply the Hodrick-Prescott Filter by decomposing the \\\"Settle\\\" price into two separate series:\\nnoise, trend = sm.tsa.filters.hpfilter(yen_futures['Settle'])\",\n \"_____no_output_____\"\n ],\n [\n \"# Create a dataframe of just the settle price, and add columns for \\\"noise\\\" and \\\"trend\\\" series from above:\\ndf = yen_futures['Settle'].to_frame()\\ndf['noise'] = noise\\ndf['trend'] = trend\\ndf.tail()\",\n \"_____no_output_____\"\n ],\n [\n \"# Plot the Settle Price vs. the Trend for 2015 to the present\\ndf.loc['2015':].plot(y=['Settle', 'trend'], figsize= (15, 10), title = 'Settle vs. Trend')\",\n \"_____no_output_____\"\n ],\n [\n \"# Plot the Settle Noise\\ndf.noise.plot(figsize= (10, 5), title = 'Noise')\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"---\",\n \"_____no_output_____\"\n ],\n [\n \"# Forecasting Returns using an ARMA Model\",\n \"_____no_output_____\"\n ],\n [\n \"Using futures Settle *Returns*, estimate an ARMA model\\n\\n1. ARMA: Create an ARMA model and fit it to the returns data. Note: Set the AR and MA (\\\"p\\\" and \\\"q\\\") parameters to p=2 and q=1: order=(2, 1).\\n2. Output the ARMA summary table and take note of the p-values of the lags. Based on the p-values, is the model a good fit (p < 0.05)?\\n3. Plot the 5-day forecast of the forecasted returns (the results forecast from ARMA model)\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"# Create a series using \\\"Settle\\\" price percentage returns, drop any nan\\\"s, and check the results:\\n# (Make sure to multiply the pct_change() results by 100)\\n# In this case, you may have to replace inf, -inf values with np.nan\\\"s\\nreturns = (yen_futures[[\\\"Settle\\\"]].pct_change() * 100)\\nreturns = returns.replace(-np.inf, np.nan).dropna()\\nreturns.tail()\",\n \"_____no_output_____\"\n ],\n [\n \"import statsmodels.api as sm\\n\\n# Estimate and ARMA model using statsmodels (use order=(2, 1))\\nfrom statsmodels.tsa.arima_model import ARMA\\narma_model = ARMA(returns['Settle'], order=(2,1))\\n\\n# Fit the model and assign it to a variable called results\\nresults = arma_model.fit()\",\n \"/Users/emilioacubero/opt/anaconda3/envs/dev/lib/python3.7/site-packages/statsmodels/tsa/arima_model.py:472: FutureWarning: \\nstatsmodels.tsa.arima_model.ARMA and statsmodels.tsa.arima_model.ARIMA have\\nbeen deprecated in favor of statsmodels.tsa.arima.model.ARIMA (note the .\\nbetween arima and model) and\\nstatsmodels.tsa.SARIMAX. These will be removed after the 0.12 release.\\n\\nstatsmodels.tsa.arima.model.ARIMA makes use of the statespace framework and\\nis both well tested and maintained.\\n\\nTo silence this warning and continue using ARMA and ARIMA until they are\\nremoved, use:\\n\\nimport warnings\\nwarnings.filterwarnings('ignore', 'statsmodels.tsa.arima_model.ARMA',\\n FutureWarning)\\nwarnings.filterwarnings('ignore', 'statsmodels.tsa.arima_model.ARIMA',\\n FutureWarning)\\n\\n warnings.warn(ARIMA_DEPRECATION_WARN, FutureWarning)\\n/Users/emilioacubero/opt/anaconda3/envs/dev/lib/python3.7/site-packages/statsmodels/tsa/base/tsa_model.py:583: ValueWarning: A date index has been provided, but it has no associated frequency information and so will be ignored when e.g. forecasting.\\n ' ignored when e.g. forecasting.', ValueWarning)\\n This problem is unconstrained.\\n\"\n ],\n [\n \"# Output model summary results:\\nresults.summary()\",\n \"_____no_output_____\"\n ],\n [\n \"# Plot the 5 Day Returns Forecast\\npd.DataFrame(results.forecast(steps=5)[0]).plot(figsize= (10, 5), title='5 Day Returns Forcast')\\n\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"---\",\n \"_____no_output_____\"\n ],\n [\n \"# Forecasting the Settle Price using an ARIMA Model\",\n \"_____no_output_____\"\n ],\n [\n \" 1. Using the *raw* Yen **Settle Price**, estimate an ARIMA model.\\n 1. Set P=5, D=1, and Q=1 in the model (e.g., ARIMA(df, order=(5,1,1))\\n 2. P= # of Auto-Regressive Lags, D= # of Differences (this is usually =1), Q= # of Moving Average Lags\\n 2. Output the ARIMA summary table and take note of the p-values of the lags. Based on the p-values, is the model a good fit (p < 0.05)?\\n 3. Construct a 5 day forecast for the Settle Price. What does the model forecast will happen to the Japanese Yen in the near term?\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"from statsmodels.tsa.arima_model import ARIMA\\n\\n# Estimate and ARIMA Model:\\n# Hint: ARIMA(df, order=(p, d, q))\\narima_model = ARIMA(yen_futures['Settle'], order=(5,1,1))\\n\\n# Fit the model\\narima_results = arima_model.fit()\",\n \"/Users/emilioacubero/opt/anaconda3/envs/dev/lib/python3.7/site-packages/statsmodels/tsa/arima_model.py:472: FutureWarning: \\nstatsmodels.tsa.arima_model.ARMA and statsmodels.tsa.arima_model.ARIMA have\\nbeen deprecated in favor of statsmodels.tsa.arima.model.ARIMA (note the .\\nbetween arima and model) and\\nstatsmodels.tsa.SARIMAX. These will be removed after the 0.12 release.\\n\\nstatsmodels.tsa.arima.model.ARIMA makes use of the statespace framework and\\nis both well tested and maintained.\\n\\nTo silence this warning and continue using ARMA and ARIMA until they are\\nremoved, use:\\n\\nimport warnings\\nwarnings.filterwarnings('ignore', 'statsmodels.tsa.arima_model.ARMA',\\n FutureWarning)\\nwarnings.filterwarnings('ignore', 'statsmodels.tsa.arima_model.ARIMA',\\n FutureWarning)\\n\\n warnings.warn(ARIMA_DEPRECATION_WARN, FutureWarning)\\n/Users/emilioacubero/opt/anaconda3/envs/dev/lib/python3.7/site-packages/statsmodels/tsa/base/tsa_model.py:583: ValueWarning: A date index has been provided, but it has no associated frequency information and so will be ignored when e.g. forecasting.\\n ' ignored when e.g. forecasting.', ValueWarning)\\n/Users/emilioacubero/opt/anaconda3/envs/dev/lib/python3.7/site-packages/statsmodels/tsa/base/tsa_model.py:583: ValueWarning: A date index has been provided, but it has no associated frequency information and so will be ignored when e.g. forecasting.\\n ' ignored when e.g. forecasting.', ValueWarning)\\n This problem is unconstrained.\\n\"\n ],\n [\n \"# Output model summary results:\\narima_results.summary()\",\n \"_____no_output_____\"\n ],\n [\n \"# Plot the 5 Day Price Forecast\\npd.DataFrame(arima_results.forecast(steps=5)[0]).plot(figsize= (10, 5), title='5 Day Futures Price Forcast')\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"---\",\n \"_____no_output_____\"\n ],\n [\n \"# Volatility Forecasting with GARCH\\n\\nRather than predicting returns, let's forecast near-term **volatility** of Japanese Yen futures returns. Being able to accurately predict volatility will be extremely useful if we want to trade in derivatives or quantify our maximum loss.\\n \\nUsing futures Settle *Returns*, estimate an GARCH model\\n\\n1. GARCH: Create an GARCH model and fit it to the returns data. Note: Set the parameters to p=2 and q=1: order=(2, 1).\\n2. Output the GARCH summary table and take note of the p-values of the lags. Based on the p-values, is the model a good fit (p < 0.05)?\\n3. Plot the 5-day forecast of the volatility.\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"from arch import arch_model\",\n \"_____no_output_____\"\n ],\n [\n \"# Estimate a GARCH model:\\ngarch_model = arch_model(returns, mean=\\\"Zero\\\", vol=\\\"GARCH\\\", p=2, q=1)\\n\\n# Fit the model\\ngarch_results = garch_model.fit(disp=\\\"off\\\")\",\n \"_____no_output_____\"\n ],\n [\n \"# Summarize the model results\\ngarch_results.summary()\",\n \"_____no_output_____\"\n ],\n [\n \"# Find the last day of the dataset\\nlast_day = returns.index.max().strftime('%Y-%m-%d')\\nlast_day\",\n \"_____no_output_____\"\n ],\n [\n \"# Create a 5 day forecast of volatility\\nforecast_horizon = 5\\n# Start the forecast using the last_day calculated above\\nforecasts = garch_results.forecast(start=last_day, horizon=forecast_horizon)\",\n \"/Users/emilioacubero/opt/anaconda3/envs/dev/lib/python3.7/site-packages/arch/__future__/_utility.py:21: FutureWarning: \\nThe default for reindex is True. After September 2021 this will change to\\nFalse. Set reindex to True or False to silence this message. Alternatively,\\nyou can use the import comment\\n\\nfrom arch.__future__ import reindexing\\n\\nto globally set reindex to True and silence this warning.\\n\\n FutureWarning,\\n\"\n ],\n [\n \"# Annualize the forecast\\nintermediate = np.sqrt(forecasts.variance.dropna() * 252)\\nintermediate.head()\",\n \"_____no_output_____\"\n ],\n [\n \"# Transpose the forecast so that it is easier to plot\\nfinal = intermediate.dropna().T\\nfinal.head()\",\n \"_____no_output_____\"\n ],\n [\n \"# Plot the final forecast\\nfinal.plot(figsize= (10, 5), title='5 Day Forecast of Volatility')\",\n \"_____no_output_____\"\n ]\n ],\n [\n [\n \"---\",\n \"_____no_output_____\"\n ],\n [\n \"# Conclusions\",\n \"_____no_output_____\"\n ],\n [\n \"Based on your time series analysis, would you buy the yen now?\\n\\nIs the risk of the yen expected to increase or decrease?\\n\\nBased on the model evaluation, would you feel confident in using these models for trading?\",\n \"_____no_output_____\"\n ],\n [\n \"With the return forceast predicting decreasing returns and the volatility forecast predicting increased volatility, I do not believe it is appropiate to invest in the yen after anaylzing these forecasts.\",\n \"_____no_output_____\"\n ]\n ]\n]"},"cell_types":{"kind":"list like","value":["code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown","code","markdown"],"string":"[\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\",\n \"code\",\n \"markdown\"\n]"},"cell_type_groups":{"kind":"list like","value":[["code"],["markdown"],["code","code"],["markdown","markdown"],["code"],["markdown","markdown","markdown"],["code","code","code","code"],["markdown","markdown","markdown"],["code","code","code","code"],["markdown","markdown","markdown"],["code","code","code"],["markdown","markdown"],["code","code","code","code","code","code","code","code"],["markdown","markdown","markdown","markdown"]],"string":"[\n [\n \"code\"\n ],\n [\n \"markdown\"\n ],\n [\n \"code\",\n \"code\"\n ],\n [\n \"markdown\",\n \"markdown\"\n ],\n [\n \"code\"\n ],\n [\n \"markdown\",\n \"markdown\",\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\",\n \"code\"\n ],\n [\n \"markdown\",\n \"markdown\",\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\",\n \"code\"\n ],\n [\n \"markdown\",\n \"markdown\",\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\"\n ],\n [\n \"markdown\",\n \"markdown\"\n ],\n [\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\",\n \"code\"\n ],\n [\n \"markdown\",\n \"markdown\",\n \"markdown\",\n \"markdown\"\n ]\n]"}}},{"rowIdx":1458828,"cells":{"hexsha":{"kind":"string","value":"e7e4102616503478306baf8084c3fff7bcb0dace"},"size":{"kind":"number","value":314350,"string":"314,350"},"ext":{"kind":"string","value":"ipynb"},"lang":{"kind":"string","value":"Jupyter Notebook"},"max_stars_repo_path":{"kind":"string","value":"lab1/Part2_Music_Generation.ipynb"},"max_stars_repo_name":{"kind":"string","value":"mukesh5237/introtodeeplearning"},"max_stars_repo_head_hexsha":{"kind":"string","value":"c847305ea5d50a0efb921b6c75438673d2cbc319"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"lab1/Part2_Music_Generation.ipynb"},"max_issues_repo_name":{"kind":"string","value":"mukesh5237/introtodeeplearning"},"max_issues_repo_head_hexsha":{"kind":"string","value":"c847305ea5d50a0efb921b6c75438673d2cbc319"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"lab1/Part2_Music_Generation.ipynb"},"max_forks_repo_name":{"kind":"string","value":"mukesh5237/introtodeeplearning"},"max_forks_repo_head_hexsha":{"kind":"string","value":"c847305ea5d50a0efb921b6c75438673d2cbc319"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"avg_line_length":{"kind":"number","value":184.8030570253,"string":"184.803057"},"max_line_length":{"kind":"number","value":220176,"string":"220,176"},"alphanum_fraction":{"kind":"number","value":0.6798918403,"string":"0.679892"},"cells":{"kind":"list like","value":[[["\n \n \n \n
\n \n Visit MIT Deep Learning\n Run in Google Colab\n View Source on GitHub
\n\n# Copyright Information","_____no_output_____"]],[["# Copyright 2021 MIT 6.S191 Introduction to Deep Learning. All Rights Reserved.\n# \n# Licensed under the MIT License. You may not use this file except in compliance\n# with the License. Use and/or modification of this code outside of 6.S191 must\n# reference:\n#\n# © MIT 6.S191: Introduction to Deep Learning\n# http://introtodeeplearning.com\n#","_____no_output_____"]],[["# Lab 1: Intro to TensorFlow and Music Generation with RNNs\n\n# Part 2: Music Generation with RNNs\n\nIn this portion of the lab, we will explore building a Recurrent Neural Network (RNN) for music generation. We will train a model to learn the patterns in raw sheet music in [ABC notation](https://en.wikipedia.org/wiki/ABC_notation) and then use this model to generate new music. ","_____no_output_____"],["## 2.1 Dependencies \nFirst, let's download the course repository, install dependencies, and import the relevant packages we'll need for this lab.","_____no_output_____"]],[["# Import Tensorflow 2.0\n#%tensorflow_version 2.x\nimport tensorflow as tf \n\n# Download and import the MIT 6.S191 package\n#!pip install mitdeeplearning\nimport mitdeeplearning as mdl\n\n# Import all remaining packages\nimport numpy as np\nimport os\nimport time\nimport functools\nfrom IPython import display as ipythondisplay\nfrom tqdm import tqdm\n#!apt-get install abcmidi timidity > /dev/null 2>&1\n\n# Check that we are using a GPU, if not switch runtimes using Runtime > Change Runtime Type > GPU\n#assert len(tf.config.list_physical_devices('GPU')) > 0","_____no_output_____"],["print(\"Num GPUs Available: \", len(tf.config.list_physical_devices('GPU')))","Num GPUs Available: 0\n"]],[["## 2.2 Dataset\n\n![Let's Dance!](http://33.media.tumblr.com/3d223954ad0a77f4e98a7b87136aa395/tumblr_nlct5lFVbF1qhu7oio1_500.gif)\n\nWe've gathered a dataset of thousands of Irish folk songs, represented in the ABC notation. Let's download the dataset and inspect it: \n","_____no_output_____"]],[["mdl.__file__","_____no_output_____"],["# Download the dataset\nsongs = mdl.lab1.load_training_data()\n\n# Print one of the songs to inspect it in greater detail!\nexample_song = songs[0]\nprint(\"\\nExample song: \")\nprint(example_song)","Found 817 songs in text\n\nExample song: \nX:1\nT:Alexander's\nZ: id:dc-hornpipe-1\nM:C|\nL:1/8\nK:D Major\n(3ABc|dAFA DFAd|fdcd FAdf|gfge fefd|(3efe (3dcB A2 (3ABc|!\ndAFA DFAd|fdcd FAdf|gfge fefd|(3efe dc d2:|!\nAG|FAdA FAdA|GBdB GBdB|Acec Acec|dfaf gecA|!\nFAdA FAdA|GBdB GBdB|Aceg fefd|(3efe dc d2:|!\n"],["songs[0]","_____no_output_____"],["len(songs)","_____no_output_____"]],[["We can easily convert a song in ABC notation to an audio waveform and play it back. Be patient for this conversion to run, it can take some time.","_____no_output_____"]],[["# Convert the ABC notation to audio file and listen to it\nmdl.lab1.play_song(example_song)","_____no_output_____"]],[["One important thing to think about is that this notation of music does not simply contain information on the notes being played, but additionally there is meta information such as the song title, key, and tempo. How does the number of different characters that are present in the text file impact the complexity of the learning problem? This will become important soon, when we generate a numerical representation for the text data.","_____no_output_____"]],[["# Join our list of song strings into a single string containing all songs\nsongs_joined = \"\\n\\n\".join(songs) \n\n# Find all unique characters in the joined string\nvocab = sorted(set(songs_joined))\nprint(\"There are\", len(vocab), \"unique characters in the dataset\")","There are 83 unique characters in the dataset\n"],["songs_joined","_____no_output_____"],["print(vocab)","['\\n', ' ', '!', '\"', '#', \"'\", '(', ')', ',', '-', '.', '/', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', ':', '<', '=', '>', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', '[', ']', '^', '_', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', '|']\n"]],[["## 2.3 Process the dataset for the learning task\n\nLet's take a step back and consider our prediction task. We're trying to train a RNN model to learn patterns in ABC music, and then use this model to generate (i.e., predict) a new piece of music based on this learned information. \n\nBreaking this down, what we're really asking the model is: given a character, or a sequence of characters, what is the most probable next character? We'll train the model to perform this task. \n\nTo achieve this, we will input a sequence of characters to the model, and train the model to predict the output, that is, the following character at each time step. RNNs maintain an internal state that depends on previously seen elements, so information about all characters seen up until a given moment will be taken into account in generating the prediction.","_____no_output_____"],["### Vectorize the text\n\nBefore we begin training our RNN model, we'll need to create a numerical representation of our text-based dataset. To do this, we'll generate two lookup tables: one that maps characters to numbers, and a second that maps numbers back to characters. Recall that we just identified the unique characters present in the text.","_____no_output_____"]],[["### Define numerical representation of text ###\n\n# Create a mapping from character to unique index.\n# For example, to get the index of the character \"d\", \n# we can evaluate `char2idx[\"d\"]`. \nchar2idx = {u:i for i, u in enumerate(vocab)}\n\n# Create a mapping from indices to characters. This is\n# the inverse of char2idx and allows us to convert back\n# from unique index to the character in our vocabulary.\nidx2char = np.array(vocab)","_____no_output_____"],["print(char2idx)","{'\\n': 0, ' ': 1, '!': 2, '\"': 3, '#': 4, \"'\": 5, '(': 6, ')': 7, ',': 8, '-': 9, '.': 10, '/': 11, '0': 12, '1': 13, '2': 14, '3': 15, '4': 16, '5': 17, '6': 18, '7': 19, '8': 20, '9': 21, ':': 22, '<': 23, '=': 24, '>': 25, 'A': 26, 'B': 27, 'C': 28, 'D': 29, 'E': 30, 'F': 31, 'G': 32, 'H': 33, 'I': 34, 'J': 35, 'K': 36, 'L': 37, 'M': 38, 'N': 39, 'O': 40, 'P': 41, 'Q': 42, 'R': 43, 'S': 44, 'T': 45, 'U': 46, 'V': 47, 'W': 48, 'X': 49, 'Y': 50, 'Z': 51, '[': 52, ']': 53, '^': 54, '_': 55, 'a': 56, 'b': 57, 'c': 58, 'd': 59, 'e': 60, 'f': 61, 'g': 62, 'h': 63, 'i': 64, 'j': 65, 'k': 66, 'l': 67, 'm': 68, 'n': 69, 'o': 70, 'p': 71, 'q': 72, 'r': 73, 's': 74, 't': 75, 'u': 76, 'v': 77, 'w': 78, 'x': 79, 'y': 80, 'z': 81, '|': 82}\n"],["idx2char","_____no_output_____"]],[["This gives us an integer representation for each character. Observe that the unique characters (i.e., our vocabulary) in the text are mapped as indices from 0 to `len(unique)`. Let's take a peek at this numerical representation of our dataset:","_____no_output_____"]],[["print('{')\nfor char,_ in zip(char2idx, range(5)):\n print(' {:4s}: {:3d},'.format(repr(char), char2idx[char]))\nprint(' ...\\n}')","{\n '\\n': 0,\n ' ' : 1,\n '!' : 2,\n '\"' : 3,\n '#' : 4,\n ...\n}\n"],["char2idx['A']","_____no_output_____"],["### Vectorize the songs string ###\n\n'''TODO: Write a function to convert the all songs string to a vectorized\n (i.e., numeric) representation. Use the appropriate mapping\n above to convert from vocab characters to the corresponding indices.\n\n NOTE: the output of the `vectorize_string` function \n should be a np.array with `N` elements, where `N` is\n the number of characters in the input string\n'''\n\ndef vectorize_string(string):\n return np.array([char2idx[string[i]] for i in range(len(string))])\n\nvectorized_songs = vectorize_string(songs_joined)","_____no_output_____"],["vectorized_songs","_____no_output_____"]],[["We can also look at how the first part of the text is mapped to an integer representation:","_____no_output_____"]],[["print ('{} ---- characters mapped to int ----> {}'.format(repr(songs_joined[:10]), vectorized_songs[:10]))\n# check that vectorized_songs is a numpy array\nassert isinstance(vectorized_songs, np.ndarray), \"returned result should be a numpy array\"","'X:1\\nT:Alex' ---- characters mapped to int ----> [49 22 13 0 45 22 26 67 60 79]\n"]],[["### Create training examples and targets\n\nOur next step is to actually divide the text into example sequences that we'll use during training. Each input sequence that we feed into our RNN will contain `seq_length` characters from the text. We'll also need to define a target sequence for each input sequence, which will be used in training the RNN to predict the next character. For each input, the corresponding target will contain the same length of text, except shifted one character to the right.\n\nTo do this, we'll break the text into chunks of `seq_length+1`. Suppose `seq_length` is 4 and our text is \"Hello\". Then, our input sequence is \"Hell\" and the target sequence is \"ello\".\n\nThe batch method will then let us convert this stream of character indices to sequences of the desired size.","_____no_output_____"]],[["### Batch definition to create training examples ###\n\ndef get_batch(vectorized_songs, seq_length, batch_size):\n # the length of the vectorized songs string\n n = vectorized_songs.shape[0] - 1\n # randomly choose the starting indices for the examples in the training batch\n idx = np.random.choice(n-seq_length, batch_size)\n\n '''TODO: construct a list of input sequences for the training batch'''\n input_batch = [vectorized_songs[idx[i]:idx[i]+seq_length] for i in range(batch_size)]\n '''TODO: construct a list of output sequences for the training batch'''\n output_batch = [vectorized_songs[idx[i]+1:idx[i]+seq_length+1] for i in range(batch_size)]\n\n # x_batch, y_batch provide the true inputs and targets for network training\n x_batch = np.reshape(input_batch, [batch_size, seq_length])\n y_batch = np.reshape(output_batch, [batch_size, seq_length])\n return x_batch, y_batch\n\n# Perform some simple tests to make sure your batch function is working properly! \ntest_args = (vectorized_songs, 10, 2)\nif not mdl.lab1.test_batch_func_types(get_batch, test_args) or \\\n not mdl.lab1.test_batch_func_shapes(get_batch, test_args) or \\\n not mdl.lab1.test_batch_func_next_step(get_batch, test_args): \n print(\"======\\n[FAIL] could not pass tests\")\nelse: \n print(\"======\\n[PASS] passed all tests!\")","[PASS] test_batch_func_types\n[PASS] test_batch_func_shapes\n[PASS] test_batch_func_next_step\n======\n[PASS] passed all tests!\n"]],[["For each of these vectors, each index is processed at a single time step. So, for the input at time step 0, the model receives the index for the first character in the sequence, and tries to predict the index of the next character. At the next timestep, it does the same thing, but the RNN considers the information from the previous step, i.e., its updated state, in addition to the current input.\n\nWe can make this concrete by taking a look at how this works over the first several characters in our text:","_____no_output_____"]],[["x_batch, y_batch = get_batch(vectorized_songs, seq_length=5, batch_size=1)\n\nfor i, (input_idx, target_idx) in enumerate(zip(np.squeeze(x_batch), np.squeeze(y_batch))):\n print(\"Step {:3d}\".format(i))\n print(\" input: {} ({:s})\".format(input_idx, repr(idx2char[input_idx])))\n print(\" expected output: {} ({:s})\".format(target_idx, repr(idx2char[target_idx])))","Step 0\n input: 10 ('.')\n expected output: 1 (' ')\nStep 1\n input: 1 (' ')\n expected output: 13 ('1')\nStep 2\n input: 13 ('1')\n expected output: 0 ('\\n')\nStep 3\n input: 0 ('\\n')\n expected output: 51 ('Z')\nStep 4\n input: 51 ('Z')\n expected output: 22 (':')\n"]],[["## 2.4 The Recurrent Neural Network (RNN) model","_____no_output_____"],["Now we're ready to define and train a RNN model on our ABC music dataset, and then use that trained model to generate a new song. We'll train our RNN using batches of song snippets from our dataset, which we generated in the previous section.\n\nThe model is based off the LSTM architecture, where we use a state vector to maintain information about the temporal relationships between consecutive characters. The final output of the LSTM is then fed into a fully connected [`Dense`](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Dense) layer where we'll output a softmax over each character in the vocabulary, and then sample from this distribution to predict the next character. \n\nAs we introduced in the first portion of this lab, we'll be using the Keras API, specifically, [`tf.keras.Sequential`](https://www.tensorflow.org/api_docs/python/tf/keras/models/Sequential), to define the model. Three layers are used to define the model:\n\n* [`tf.keras.layers.Embedding`](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Embedding): This is the input layer, consisting of a trainable lookup table that maps the numbers of each character to a vector with `embedding_dim` dimensions.\n* [`tf.keras.layers.LSTM`](https://www.tensorflow.org/api_docs/python/tf/keras/layers/LSTM): Our LSTM network, with size `units=rnn_units`. \n* [`tf.keras.layers.Dense`](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Dense): The output layer, with `vocab_size` outputs.\n\n\n\"Drawing\"/","_____no_output_____"],["Embedding vs one-hot encoding\n\nEmbedding - creates continuous representation of vocab, similar words stay close to each other, this representation is learnt, needs less dimenstionality than one-hot encoding to represent vocab\none hot encoding - leads to curse of dimensionality for a large vocab","_____no_output_____"],["### Define the RNN model\n\nNow, we will define a function that we will use to actually build the model.","_____no_output_____"]],[["def LSTM(rnn_units): \n return tf.keras.layers.LSTM(\n rnn_units, \n return_sequences=True, \n recurrent_initializer='glorot_uniform',\n recurrent_activation='sigmoid',\n stateful=True,\n )","_____no_output_____"]],[["The time has come! Fill in the `TODOs` to define the RNN model within the `build_model` function, and then call the function you just defined to instantiate the model!","_____no_output_____"]],[["len(vocab)","_____no_output_____"],["### Defining the RNN Model ###\n\n'''TODO: Add LSTM and Dense layers to define the RNN model using the Sequential API.'''\ndef build_model(vocab_size, embedding_dim, rnn_units, batch_size):\n model = tf.keras.Sequential([\n # Layer 1: Embedding layer to transform indices into dense vectors of a fixed embedding size\n # None is the sequence length, just a place holder\n tf.keras.layers.Embedding(vocab_size, embedding_dim, batch_input_shape=[batch_size, None]),\n\n # Layer 2: LSTM with `rnn_units` number of units. \n # TODO: Call the LSTM function defined above to add this layer.\n LSTM(rnn_units),\n\n # Layer 3: Dense (fully-connected) layer that transforms the LSTM output into the vocabulary size. \n # TODO: Add the Dense layer.\n # '''TODO: DENSE LAYER HERE'''\n tf.keras.layers.Dense(units=vocab_size)\n ])\n\n return model\n\n# Build a simple model with default hyperparameters. You will get the chance to change these later.\nmodel = build_model(len(vocab), embedding_dim=256, rnn_units=1024, batch_size=32)","_____no_output_____"]],[["### Test out the RNN model\n\nIt's always a good idea to run a few simple checks on our model to see that it behaves as expected. \n\nFirst, we can use the `Model.summary` function to print out a summary of our model's internal workings. Here we can check the layers in the model, the shape of the output of each of the layers, the batch size, etc.","_____no_output_____"]],[["model.summary()","Model: \"sequential_1\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nembedding_1 (Embedding) (32, None, 256) 21248 \n_________________________________________________________________\nlstm_1 (LSTM) (32, None, 1024) 5246976 \n_________________________________________________________________\ndense_1 (Dense) (32, None, 83) 85075 \n=================================================================\nTotal params: 5,353,299\nTrainable params: 5,353,299\nNon-trainable params: 0\n_________________________________________________________________\n"]],[["We can also quickly check the dimensionality of our output, using a sequence length of 100. Note that the model can be run on inputs of any length.","_____no_output_____"]],[["x, y = get_batch(vectorized_songs, seq_length=100, batch_size=32)\npred = model(x)\nprint(\"Input shape: \", x.shape, \" # (batch_size, sequence_length)\")\nprint(\"Prediction shape: \", pred.shape, \"# (batch_size, sequence_length, vocab_size)\")","Input shape: (32, 100) # (batch_size, sequence_length)\nPrediction shape: (32, 100, 83) # (batch_size, sequence_length, vocab_size)\n"],["x","_____no_output_____"],["y","_____no_output_____"],["pred","_____no_output_____"]],[["### Predictions from the untrained model\n\nLet's take a look at what our untrained model is predicting.\n\nTo get actual predictions from the model, we sample from the output distribution, which is defined by a `softmax` over our character vocabulary. This will give us actual character indices. This means we are using a [categorical distribution](https://en.wikipedia.org/wiki/Categorical_distribution) to sample over the example prediction. This gives a prediction of the next character (specifically its index) at each timestep.\n\nNote here that we sample from this probability distribution, as opposed to simply taking the `argmax`, which can cause the model to get stuck in a loop.\n\nLet's try this sampling out for the first example in the batch.","_____no_output_____"]],[["# for batch 0, input sequence size: 100, so output sequence size: 100\nsampled_indices = tf.random.categorical(pred[0], num_samples=1)\nsampled_indices = tf.squeeze(sampled_indices,axis=-1).numpy()\nsampled_indices","_____no_output_____"],["x[0]","_____no_output_____"]],[["We can now decode these to see the text predicted by the untrained model:","_____no_output_____"]],[["print(\"Input: \\n\", repr(\"\".join(idx2char[x[0]])))\nprint()\nprint(\"Next Char Predictions: \\n\", repr(\"\".join(idx2char[sampled_indices])))","Input: \n 'AG|F2D DED|FEF GFG|!\\nA3 cAG|AGA cde|fed cAG|Ad^c d2:|!\\ne|f2d d^cd|f2a agf|e2c cBc|e2f gfe|!\\nf2g agf|'\n\nNext Char Predictions: \n '^VIQXjybPEk-^_G/>#T9ZLYJ\"CkYXBE\\nUUDBU