Datasets:

Modalities:
Text
Formats:
parquet
Size:
< 1K
Libraries:
Datasets
pandas
Dataset Viewer
Auto-converted to Parquet
path
stringlengths
13
14
screenshot_names
sequencelengths
1
11
code
stringlengths
1
7.42k
cell_type
stringclasses
1 value
303338\cell_2
[ "application_vnd.jupyter.stderr_output_1.png", "text_html_output_1.png" ]
import pandas as pd import seaborn as sns import pandas as pd import seaborn as sns sns.set_style('whitegrid') zika = pd.read_csv('../input/cdc_zika.csv') zika.groupby('location').size().reset_index().rename(columns={0: 'count'})
code
306027\cell_2
[ "text_html_output_1.png" ]
import pandas as pd import sqlite3 import pandas as pd import sqlite3 con = sqlite3.connect('../input/database.sqlite') scripts = pd.read_sql_query('\nSELECT s.Id,\n cv.Title,\n COUNT(DISTINCT vo.Id) NumVotes,\n COUNT(DISTINCT CASE WHEN vo.UserId!=s.AuthorUserId THEN vo.Id ELSE NULL END) NumNonSelfVotes,\n CASE WHEN COUNT(DISTINCT CASE WHEN vo.UserId!=s.AuthorUserId THEN vo.Id ELSE NULL END)>0 THEN 1 ELSE 0 END HasNonSelfVotes,\n COUNT(DISTINCT v.Id) NumVersions,\n SUM(CASE WHEN r.WorkerStatus=2 THEN 1 ELSE 0 END) NumSuccessfulRuns,\n SUM(CASE WHEN r.WorkerStatus=3 THEN 1 ELSE 0 END) NumErroredRuns,\n SUM(CASE WHEN v.IsChange=1 THEN 1 ELSE 0 END) NumChangedVersions,\n SUM(v.LinesInsertedFromPrevious-v.LinesDeletedFromPrevious) Lines,\n SUM(v.LinesInsertedFromPrevious+v.LinesChangedFromPrevious) LinesAddedOrChanged,\n l.Name\nFROM Scripts s\nINNER JOIN ScriptVersions v ON v.ScriptId=s.Id\nINNER JOIN ScriptVersions cv ON s.CurrentScriptVersionId=cv.Id\nINNER JOIN ScriptRuns r ON r.ScriptVersionId=v.Id\nINNER JOIN ScriptLanguages l ON v.ScriptLanguageId=l.Id\nLEFT OUTER JOIN ScriptVotes vo ON vo.ScriptVersionId=v.Id\nWHERE r.WorkerStatus != 4\n AND r.WorkerStatus != 5\nGROUP BY s.Id,\n cv.Title,\n cv.Id,\n l.Name\nORDER BY cv.Id DESC\n', con) scripts
code
306027\cell_3
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png" ]
from sklearn.cross_validation import train_test_split from sklearn.ensemble import RandomForestClassifier from sklearn.pipeline import Pipeline, FeatureUnion import pandas as pd import sqlite3 import pandas as pd import sqlite3 con = sqlite3.connect('../input/database.sqlite') scripts = pd.read_sql_query('\n\nSELECT s.Id,\n\n cv.Title,\n\n COUNT(DISTINCT vo.Id) NumVotes,\n\n COUNT(DISTINCT CASE WHEN vo.UserId!=s.AuthorUserId THEN vo.Id ELSE NULL END) NumNonSelfVotes,\n\n CASE WHEN COUNT(DISTINCT CASE WHEN vo.UserId!=s.AuthorUserId THEN vo.Id ELSE NULL END)>0 THEN 1 ELSE 0 END HasNonSelfVotes,\n\n COUNT(DISTINCT v.Id) NumVersions,\n\n SUM(CASE WHEN r.WorkerStatus=2 THEN 1 ELSE 0 END) NumSuccessfulRuns,\n\n SUM(CASE WHEN r.WorkerStatus=3 THEN 1 ELSE 0 END) NumErroredRuns,\n\n SUM(CASE WHEN v.IsChange=1 THEN 1 ELSE 0 END) NumChangedVersions,\n\n SUM(v.LinesInsertedFromPrevious-v.LinesDeletedFromPrevious) Lines,\n\n SUM(v.LinesInsertedFromPrevious+v.LinesChangedFromPrevious) LinesAddedOrChanged,\n\n l.Name\n\nFROM Scripts s\n\nINNER JOIN ScriptVersions v ON v.ScriptId=s.Id\n\nINNER JOIN ScriptVersions cv ON s.CurrentScriptVersionId=cv.Id\n\nINNER JOIN ScriptRuns r ON r.ScriptVersionId=v.Id\n\nINNER JOIN ScriptLanguages l ON v.ScriptLanguageId=l.Id\n\nLEFT OUTER JOIN ScriptVotes vo ON vo.ScriptVersionId=v.Id\n\nWHERE r.WorkerStatus != 4\n\n AND r.WorkerStatus != 5\n\nGROUP BY s.Id,\n\n cv.Title,\n\n cv.Id,\n\n l.Name\n\nORDER BY cv.Id DESC\n\n', con) scripts from sklearn.pipeline import Pipeline, FeatureUnion from sklearn.cross_validation import train_test_split from sklearn.ensemble import RandomForestClassifier class RawColumnExtractor: def __init__(self, column): self.column = column def fit(self, *_): return self def transform(self, data): return data[[self.column]] features = FeatureUnion([('NumSuccessfulRuns', RawColumnExtractor('NumSuccessfulRuns')), ('NumChangedVersions', RawColumnExtractor('NumChangedVersions'))]) pipeline = Pipeline([('feature_union', features), ('predictor', RandomForestClassifier())]) train = scripts target_name = 'HasNonSelfVotes' (x_train, x_test, y_train, y_test) = train_test_split(train, train[target_name], test_size=0.4, random_state=0) pipeline.fit(x_train, y_train) score = pipeline.score(x_test, y_test) print('Score %f' % score)
code
306027\cell_4
[ "text_html_output_1.png" ]
import pandas as pd import sqlite3 import pandas as pd import sqlite3 con = sqlite3.connect('../input/database.sqlite') scripts = pd.read_sql_query('\n\nSELECT s.Id,\n\n cv.Title,\n\n COUNT(DISTINCT vo.Id) NumVotes,\n\n COUNT(DISTINCT CASE WHEN vo.UserId!=s.AuthorUserId THEN vo.Id ELSE NULL END) NumNonSelfVotes,\n\n CASE WHEN COUNT(DISTINCT CASE WHEN vo.UserId!=s.AuthorUserId THEN vo.Id ELSE NULL END)>0 THEN 1 ELSE 0 END HasNonSelfVotes,\n\n COUNT(DISTINCT v.Id) NumVersions,\n\n SUM(CASE WHEN r.WorkerStatus=2 THEN 1 ELSE 0 END) NumSuccessfulRuns,\n\n SUM(CASE WHEN r.WorkerStatus=3 THEN 1 ELSE 0 END) NumErroredRuns,\n\n SUM(CASE WHEN v.IsChange=1 THEN 1 ELSE 0 END) NumChangedVersions,\n\n SUM(v.LinesInsertedFromPrevious-v.LinesDeletedFromPrevious) Lines,\n\n SUM(v.LinesInsertedFromPrevious+v.LinesChangedFromPrevious) LinesAddedOrChanged,\n\n l.Name\n\nFROM Scripts s\n\nINNER JOIN ScriptVersions v ON v.ScriptId=s.Id\n\nINNER JOIN ScriptVersions cv ON s.CurrentScriptVersionId=cv.Id\n\nINNER JOIN ScriptRuns r ON r.ScriptVersionId=v.Id\n\nINNER JOIN ScriptLanguages l ON v.ScriptLanguageId=l.Id\n\nLEFT OUTER JOIN ScriptVotes vo ON vo.ScriptVersionId=v.Id\n\nWHERE r.WorkerStatus != 4\n\n AND r.WorkerStatus != 5\n\nGROUP BY s.Id,\n\n cv.Title,\n\n cv.Id,\n\n l.Name\n\nORDER BY cv.Id DESC\n\n', con) scripts pd.read_sql_query('\nSELECT *\nFROM ScriptLanguages\nLIMIT 100\n', con)
code
309674\cell_3
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import networkx as nx import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import networkx as nx import matplotlib.pyplot as plt from subprocess import check_output comments = pd.read_csv('../input/comment.csv') likes = pd.read_csv('../input/like.csv') members = pd.read_csv('../input/member.csv') posts = pd.read_csv('../input/post.csv') likeResponse = pd.merge(likes.loc[likes['gid'] == 117291968282998], posts.loc[posts['gid'] == 117291968282998, ['pid', 'name']], left_on='pid', right_on='pid') result = likeResponse.groupby(['name_y', 'name_x'])['response'].count() finalResult = pd.DataFrame(result.index.values, columns=['NameCombo']) finalResult['Weight'] = result.values finalResult['From'] = finalResult['NameCombo'].map(lambda x: x[0]) finalResult['To'] = finalResult['NameCombo'].map(lambda x: x[1]) del finalResult['NameCombo'] g = nx.Graph() plt.figure() g.add_edges_from([(row['From'], row['To']) for (index, row) in finalResult.iterrows()]) d = nx.degree(g) spring_pos = nx.spring_layout(g) plt.axis('off') nx.draw_networkx(g, spring_pos, with_labels=False, nodelist=d.keys(), node_size=[v * 10 for v in d.values()]) plt.savefig('LIKE_PLOT_GROUP1.png') plt.clf()
code
309683\cell_3
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import networkx as nx import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import networkx as nx import matplotlib.pyplot as plt from subprocess import check_output comments = pd.read_csv('../input/comment.csv') likes = pd.read_csv('../input/like.csv') members = pd.read_csv('../input/member.csv') posts = pd.read_csv('../input/post.csv') likeResponse = pd.merge(likes.loc[likes['gid'] == 117291968282998], posts.loc[posts['gid'] == 117291968282998, ['pid', 'name']], left_on='pid', right_on='pid') result = likeResponse.groupby(['name_y', 'name_x'])['response'].count() finalResult = pd.DataFrame(result.index.values, columns=['NameCombo']) finalResult['Weight'] = result.values finalResult['From'] = finalResult['NameCombo'].map(lambda x: x[0]) finalResult['To'] = finalResult['NameCombo'].map(lambda x: x[1]) del finalResult['NameCombo'] g = nx.Graph() plt.figure() g.add_edges_from([(row['From'], row['To']) for (index, row) in finalResult.iterrows()]) d = nx.degree(g) spring_pos = nx.spring_layout(g) plt.axis('off') nx.draw_networkx(g, spring_pos, with_labels=False, nodelist=d.keys(), node_size=[v * 10 for v in d.values()]) plt.savefig('LIKE_PLOT_GROUP1.png') plt.clf()
code
309683\cell_4
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import networkx as nx import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import networkx as nx import matplotlib.pyplot as plt from subprocess import check_output comments = pd.read_csv('../input/comment.csv') likes = pd.read_csv('../input/like.csv') members = pd.read_csv('../input/member.csv') posts = pd.read_csv('../input/post.csv') likeResponse = pd.merge(likes.loc[likes['gid'] == 117291968282998], posts.loc[posts['gid'] == 117291968282998, ['pid', 'name']], left_on='pid', right_on='pid') result = likeResponse.groupby(['name_y', 'name_x'])['response'].count() finalResult = pd.DataFrame(result.index.values, columns=['NameCombo']) finalResult['Weight'] = result.values finalResult['From'] = finalResult['NameCombo'].map(lambda x: x[0]) finalResult['To'] = finalResult['NameCombo'].map(lambda x: x[1]) del finalResult['NameCombo'] g = nx.Graph() g.add_edges_from([(row['From'], row['To']) for (index, row) in finalResult.iterrows()]) d = nx.degree(g) spring_pos = nx.spring_layout(g) plt.axis('off') plt.clf() g.number_of_nodes() spring_pos = nx.spring_layout(g, scale=2) nx.draw(g, spring_pos, with_labels=False, nodelist=d.keys(), node_size=[v * 5 for v in d.values()])
code
309683\cell_5
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import networkx as nx import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import networkx as nx import matplotlib.pyplot as plt from subprocess import check_output comments = pd.read_csv('../input/comment.csv') likes = pd.read_csv('../input/like.csv') members = pd.read_csv('../input/member.csv') posts = pd.read_csv('../input/post.csv') likeResponse = pd.merge(likes.loc[likes['gid'] == 117291968282998], posts.loc[posts['gid'] == 117291968282998, ['pid', 'name']], left_on='pid', right_on='pid') result = likeResponse.groupby(['name_y', 'name_x'])['response'].count() finalResult = pd.DataFrame(result.index.values, columns=['NameCombo']) finalResult['Weight'] = result.values finalResult['From'] = finalResult['NameCombo'].map(lambda x: x[0]) finalResult['To'] = finalResult['NameCombo'].map(lambda x: x[1]) del finalResult['NameCombo'] g = nx.Graph() g.add_edges_from([(row['From'], row['To']) for (index, row) in finalResult.iterrows()]) d = nx.degree(g) spring_pos = nx.spring_layout(g) plt.axis('off') plt.clf() f = open('g.json', 'w') f.write('{"nodes":[') str1 = '' for i in finalResult['From'].unique(): str1 += '{"name":"' + str(i) + '","group":' + str(1) + '},' f.write(str1[:-1]) f.write('],"links":[') str1 = '' for i in range(len(finalResult)): str1 += '{"source":' + str(finalResult['From'][i]) + ',"target":' + str(finalResult['To'][i]) + ',"value":' + str(finalResult['Weight'][i]) + '},' f.write(str1[:-1]) f.write(']}') f.close h1 = '\n<!DOCTYPE html>\n<meta charset="utf-8">\n<style>\n.link {stroke: #ccc;}\n.node text {pointer-events: none; font: 10px sans-serif;}\n</style>\n<body>\n<script src="https://cdnjs.cloudflare.com/ajax/libs/d3/3.5.5/d3.min.js"></script>\n<script>\nvar width = 800, height = 800;\nvar color = d3.scale.category20();\nvar force = d3.layout.force()\n .charge(-120)\n .linkDistance(80)\n .size([width, height]);\nvar svg = d3.select("body").append("svg")\n .attr("width", width)\n .attr("height", height);\nd3.json("g.json", function(error, graph) {\n if (error) throw error;\n\tforce.nodes(graph.nodes)\n\t .links(graph.links)\n\t .start();\n\tvar link = svg.selectAll(".link")\n\t .data(graph.links)\n\t .enter().append("line")\n\t .attr("class", "link")\n\t .style("stroke-width", function (d) {return Math.sqrt(d.value);});\n\tvar node = svg.selectAll(".node")\n\t .data(graph.nodes)\n\t .enter().append("g")\n\t .attr("class", "node")\n\t .call(force.drag);\n\tnode.append("circle")\n\t .attr("r", 8)\n\t .style("fill", function (d) {return color(d.group);})\n\tnode.append("text")\n\t .attr("dx", 10)\n\t .attr("dy", ".35em")\n\t .text(function(d) { return d.name });\n\tforce.on("tick", function () {\n\t link.attr("x1", function (d) {return d.source.x;})\n\t\t.attr("y1", function (d) {return d.source.y;})\n\t\t.attr("x2", function (d) {return d.target.x;})\n\t\t.attr("y2", function (d) {return d.target.y;});\n\t d3.selectAll("circle").attr("cx", function (d) {return d.x;})\n\t\t.attr("cy", function (d) {return d.y;});\n\t d3.selectAll("text").attr("x", function (d) {return d.x;})\n\t\t.attr("y", function (d) {return d.y;});\n });\n});\n</script>\n' f = open('output.html', 'w') f.write(h1) f.close
code
311174\cell_2
[ "text_plain_output_1.png" ]
from subprocess import check_output import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sbn from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8'))
code
311174\cell_3
[ "application_vnd.jupyter.stderr_output_1.png", "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/cdc_zika.csv', parse_dates=['report_date'], infer_datetime_format=True, index_col=0) df.head(3)
code
311174\cell_4
[ "image_output_1.png", "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/cdc_zika.csv', parse_dates=['report_date'], infer_datetime_format=True, index_col=0) df.location.value_counts()[:30].plot(kind='bar', figsize=(12, 7)) plt.title('Number of locations reported - Top 30')
code
311174\cell_5
[ "image_output_1.png", "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/cdc_zika.csv', parse_dates=['report_date'], infer_datetime_format=True, index_col=0) df[df.data_field == 'confirmed_male'].value.plot() df[df.data_field == 'confirmed_female'].value.plot().legend(('Male', 'Female'), loc='best') plt.title('Confirmed Male vs Female cases')
code
311500\cell_1
[ "text_plain_output_1.png" ]
from subprocess import check_output import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sbn from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8'))
code
311500\cell_2
[ "application_vnd.jupyter.stderr_output_1.png", "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/cdc_zika.csv', parse_dates=['report_date'], infer_datetime_format=True, index_col=0) df.head(3)
code
311500\cell_3
[ "image_output_1.png", "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/cdc_zika.csv', parse_dates=['report_date'], infer_datetime_format=True, index_col=0) df.location.value_counts()[:30].plot(kind='bar', figsize=(12, 7)) plt.title('Number of locations reported - Top 30')
code
311500\cell_4
[ "image_output_1.png", "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/cdc_zika.csv', parse_dates=['report_date'], infer_datetime_format=True, index_col=0) df[df.data_field == 'confirmed_male'].value.plot() df[df.data_field == 'confirmed_female'].value.plot().legend(('Male', 'Female'), loc='best') plt.title('Confirmed Male vs Female cases')
code
311500\cell_6
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/cdc_zika.csv', parse_dates=['report_date'], infer_datetime_format=True, index_col=0) df.data_field.unique()
code
311500\cell_7
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/cdc_zika.csv', parse_dates=['report_date'], infer_datetime_format=True, index_col=0) df.data_field.unique() age_groups = ('confirmed_age_under_1', 'confirmed_age_1-4', 'confirmed_age_5-9', 'confirmed_age_10-14', 'confirmed_age_15-19', 'confirmed_age_20-24', 'confirmed_age_25-34', 'confirmed_age_35-49', 'confirmed_age_50-59', 'confirmed_age_60-64', 'confirmed_age_60_plus') for (i, age_group) in enumerate(age_groups): print(age_group) print(df[df.data_field == age_group].value) print('')
code
312349\cell_2
[ "text_plain_output_1.png" ]
from subprocess import check_output import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sbn from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8'))
code
312349\cell_3
[ "application_vnd.jupyter.stderr_output_1.png", "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/cdc_zika.csv', parse_dates=['report_date'], infer_datetime_format=True, index_col=0) df.head(3)
code
312349\cell_4
[ "image_output_1.png", "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/cdc_zika.csv', parse_dates=['report_date'], infer_datetime_format=True, index_col=0) df.location.value_counts()[:30].plot(kind='bar', figsize=(12, 7)) plt.title('Number of locations reported - Top 30')
code
312349\cell_5
[ "image_output_1.png", "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/cdc_zika.csv', parse_dates=['report_date'], infer_datetime_format=True, index_col=0) df[df.data_field == 'confirmed_male'].value.plot() df[df.data_field == 'confirmed_female'].value.plot().legend(('Male', 'Female'), loc='best') plt.title('Confirmed Male vs Female cases')
code
312349\cell_6
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/cdc_zika.csv', parse_dates=['report_date'], infer_datetime_format=True, index_col=0) age_groups = ('confirmed_age_under_1', 'confirmed_age_1-4', 'confirmed_age_5-9', 'confirmed_age_10-14', 'confirmed_age_15-19', 'confirmed_age_20-24', 'confirmed_age_25-34', 'confirmed_age_35-49', 'confirmed_age_50-59', 'confirmed_age_60-64', 'confirmed_age_60_plus') for (i, age_group) in enumerate(age_groups): print(age_group) print(df[df.data_field == age_group].value) print('')
code
312349\cell_8
[ "image_output_1.png", "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/cdc_zika.csv', parse_dates=['report_date'], infer_datetime_format=True, index_col=0) age_groups = ('confirmed_age_under_1', 'confirmed_age_1-4', 'confirmed_age_5-9', 'confirmed_age_10-14', 'confirmed_age_15-19', 'confirmed_age_20-24', 'confirmed_age_25-34', 'confirmed_age_35-49', 'confirmed_age_50-59', 'confirmed_age_60-64', 'confirmed_age_60_plus') symptoms = ['confirmed_fever', 'confirmed_acute_fever', 'confirmed_arthralgia', 'confirmed_arthritis', 'confirmed_rash', 'confirmed_conjunctivitis', 'confirmed_eyepain', 'confirmed_headache', 'confirmed_malaise'] fig = plt.figure(figsize=(13, 13)) for symptom in symptoms: df[df.data_field == symptom].value.plot() plt.legend(symptoms, loc='best') plt.title('Understanding symptoms of zika virus')
code
316827\cell_13
[ "text_html_output_1.png" ]
import pandas as pd posts = pd.read_csv('../input/post.csv', parse_dates=['timeStamp']) comments = pd.read_csv('../input/comment.csv') com_count = comments.groupby('pid').count()['cid'] data = posts.join(com_count, on='pid', rsuffix='c')[['msg', 'likes', 'shares', 'cid', 'gid']] data.columns = ['msg', 'likes', 'shares', 'comments', 'gid'] data['msg_len'] = data.msg.apply(len) data.gid = data.gid.map({117291968282998: 1, 25160801076: 2, 1443890352589739: 3}) data.fillna(0, inplace=True) data.head()
code
316827\cell_16
[ "image_output_1.png", "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns posts = pd.read_csv('../input/post.csv', parse_dates=['timeStamp']) comments = pd.read_csv('../input/comment.csv') com_count = comments.groupby('pid').count()['cid'] data = posts.join(com_count, on='pid', rsuffix='c')[['msg', 'likes', 'shares', 'cid', 'gid']] data.columns = ['msg', 'likes', 'shares', 'comments', 'gid'] data['msg_len'] = data.msg.apply(len) data.gid = data.gid.map({117291968282998: 1, 25160801076: 2, 1443890352589739: 3}) data.fillna(0, inplace=True) sns.pairplot(data, hue='gid')
code
316827\cell_20
[ "text_plain_output_1.png" ]
from statsmodels.stats.weightstats import zconfint import pandas as pd posts = pd.read_csv('../input/post.csv', parse_dates=['timeStamp']) comments = pd.read_csv('../input/comment.csv') com_count = comments.groupby('pid').count()['cid'] data = posts.join(com_count, on='pid', rsuffix='c')[['msg', 'likes', 'shares', 'cid', 'gid']] data.columns = ['msg', 'likes', 'shares', 'comments', 'gid'] data['msg_len'] = data.msg.apply(len) data.gid = data.gid.map({117291968282998: 1, 25160801076: 2, 1443890352589739: 3}) data.fillna(0, inplace=True) park = data[data.gid == 1] town = data[data.gid == 2] free = data[data.gid == 3] def conf_interval(field): """" Calculate confidence interval for given field """ conf_interval('likes')
code
316827\cell_24
[ "text_plain_output_1.png" ]
from scipy.stats import mannwhitneyu from statsmodels.sandbox.stats.multicomp import multipletests from statsmodels.stats.weightstats import zconfint import pandas as pd posts = pd.read_csv('../input/post.csv', parse_dates=['timeStamp']) comments = pd.read_csv('../input/comment.csv') com_count = comments.groupby('pid').count()['cid'] data = posts.join(com_count, on='pid', rsuffix='c')[['msg', 'likes', 'shares', 'cid', 'gid']] data.columns = ['msg', 'likes', 'shares', 'comments', 'gid'] data['msg_len'] = data.msg.apply(len) data.gid = data.gid.map({117291968282998: 1, 25160801076: 2, 1443890352589739: 3}) data.fillna(0, inplace=True) park = data[data.gid == 1] town = data[data.gid == 2] free = data[data.gid == 3] def conf_interval(field): """" Calculate confidence interval for given field """ def compare_means(field): """ Mann–Whitney test to compare mean values level """ mapping = {1: 'EPH', 2: 'UCT', 3: 'FSZ'} comparison = pd.DataFrame(columns=['group1', 'group2', 'p_value']) for i in range(1, 4): for j in range(1, 4): if i >= j: continue p = mannwhitneyu(data[data.gid == i][field], data[data.gid == j][field])[1] comparison = comparison.append({'group1': mapping[i], 'group2': mapping[j], 'p_value': p}, ignore_index=True) (rejected, p_corrected, a1, a2) = multipletests(comparison.p_value, alpha=0.05, method='holm') comparison['p_value_corrected'] = p_corrected comparison['rejected'] = rejected return comparison conf_interval('likes') print(compare_means('likes'))
code
316827\cell_27
[ "text_plain_output_1.png" ]
from scipy.stats import mannwhitneyu from statsmodels.sandbox.stats.multicomp import multipletests from statsmodels.stats.weightstats import zconfint import pandas as pd posts = pd.read_csv('../input/post.csv', parse_dates=['timeStamp']) comments = pd.read_csv('../input/comment.csv') com_count = comments.groupby('pid').count()['cid'] data = posts.join(com_count, on='pid', rsuffix='c')[['msg', 'likes', 'shares', 'cid', 'gid']] data.columns = ['msg', 'likes', 'shares', 'comments', 'gid'] data['msg_len'] = data.msg.apply(len) data.gid = data.gid.map({117291968282998: 1, 25160801076: 2, 1443890352589739: 3}) data.fillna(0, inplace=True) park = data[data.gid == 1] town = data[data.gid == 2] free = data[data.gid == 3] def conf_interval(field): """" Calculate confidence interval for given field """ def compare_means(field): """ Mann–Whitney test to compare mean values level """ mapping = {1: 'EPH', 2: 'UCT', 3: 'FSZ'} comparison = pd.DataFrame(columns=['group1', 'group2', 'p_value']) for i in range(1, 4): for j in range(1, 4): if i >= j: continue p = mannwhitneyu(data[data.gid == i][field], data[data.gid == j][field])[1] comparison = comparison.append({'group1': mapping[i], 'group2': mapping[j], 'p_value': p}, ignore_index=True) (rejected, p_corrected, a1, a2) = multipletests(comparison.p_value, alpha=0.05, method='holm') comparison['p_value_corrected'] = p_corrected comparison['rejected'] = rejected return comparison conf_interval('shares') print(compare_means('shares'))
code
316827\cell_30
[ "text_plain_output_1.png" ]
from scipy.stats import mannwhitneyu from statsmodels.sandbox.stats.multicomp import multipletests from statsmodels.stats.weightstats import zconfint import pandas as pd posts = pd.read_csv('../input/post.csv', parse_dates=['timeStamp']) comments = pd.read_csv('../input/comment.csv') com_count = comments.groupby('pid').count()['cid'] data = posts.join(com_count, on='pid', rsuffix='c')[['msg', 'likes', 'shares', 'cid', 'gid']] data.columns = ['msg', 'likes', 'shares', 'comments', 'gid'] data['msg_len'] = data.msg.apply(len) data.gid = data.gid.map({117291968282998: 1, 25160801076: 2, 1443890352589739: 3}) data.fillna(0, inplace=True) park = data[data.gid == 1] town = data[data.gid == 2] free = data[data.gid == 3] def conf_interval(field): """" Calculate confidence interval for given field """ def compare_means(field): """ Mann–Whitney test to compare mean values level """ mapping = {1: 'EPH', 2: 'UCT', 3: 'FSZ'} comparison = pd.DataFrame(columns=['group1', 'group2', 'p_value']) for i in range(1, 4): for j in range(1, 4): if i >= j: continue p = mannwhitneyu(data[data.gid == i][field], data[data.gid == j][field])[1] comparison = comparison.append({'group1': mapping[i], 'group2': mapping[j], 'p_value': p}, ignore_index=True) (rejected, p_corrected, a1, a2) = multipletests(comparison.p_value, alpha=0.05, method='holm') comparison['p_value_corrected'] = p_corrected comparison['rejected'] = rejected return comparison conf_interval('comments') print(compare_means('comments'))
code
316827\cell_33
[ "text_plain_output_1.png" ]
from scipy.stats import mannwhitneyu from statsmodels.sandbox.stats.multicomp import multipletests from statsmodels.stats.weightstats import zconfint import pandas as pd posts = pd.read_csv('../input/post.csv', parse_dates=['timeStamp']) comments = pd.read_csv('../input/comment.csv') com_count = comments.groupby('pid').count()['cid'] data = posts.join(com_count, on='pid', rsuffix='c')[['msg', 'likes', 'shares', 'cid', 'gid']] data.columns = ['msg', 'likes', 'shares', 'comments', 'gid'] data['msg_len'] = data.msg.apply(len) data.gid = data.gid.map({117291968282998: 1, 25160801076: 2, 1443890352589739: 3}) data.fillna(0, inplace=True) park = data[data.gid == 1] town = data[data.gid == 2] free = data[data.gid == 3] def conf_interval(field): """" Calculate confidence interval for given field """ def compare_means(field): """ Mann–Whitney test to compare mean values level """ mapping = {1: 'EPH', 2: 'UCT', 3: 'FSZ'} comparison = pd.DataFrame(columns=['group1', 'group2', 'p_value']) for i in range(1, 4): for j in range(1, 4): if i >= j: continue p = mannwhitneyu(data[data.gid == i][field], data[data.gid == j][field])[1] comparison = comparison.append({'group1': mapping[i], 'group2': mapping[j], 'p_value': p}, ignore_index=True) (rejected, p_corrected, a1, a2) = multipletests(comparison.p_value, alpha=0.05, method='holm') comparison['p_value_corrected'] = p_corrected comparison['rejected'] = rejected return comparison conf_interval('msg_len') print(compare_means('msg_len'))
code
316827\cell_37
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png" ]
from scipy.stats import mannwhitneyu from statsmodels.sandbox.stats.multicomp import multipletests from statsmodels.stats.weightstats import zconfint import pandas as pd posts = pd.read_csv('../input/post.csv', parse_dates=['timeStamp']) comments = pd.read_csv('../input/comment.csv') com_count = comments.groupby('pid').count()['cid'] data = posts.join(com_count, on='pid', rsuffix='c')[['msg', 'likes', 'shares', 'cid', 'gid']] data.columns = ['msg', 'likes', 'shares', 'comments', 'gid'] data['msg_len'] = data.msg.apply(len) data.gid = data.gid.map({117291968282998: 1, 25160801076: 2, 1443890352589739: 3}) data.fillna(0, inplace=True) park = data[data.gid == 1] town = data[data.gid == 2] free = data[data.gid == 3] def conf_interval(field): """" Calculate confidence interval for given field """ def compare_means(field): """ Mann–Whitney test to compare mean values level """ mapping = {1: 'EPH', 2: 'UCT', 3: 'FSZ'} comparison = pd.DataFrame(columns=['group1', 'group2', 'p_value']) for i in range(1, 4): for j in range(1, 4): if i >= j: continue p = mannwhitneyu(data[data.gid == i][field], data[data.gid == j][field])[1] comparison = comparison.append({'group1': mapping[i], 'group2': mapping[j], 'p_value': p}, ignore_index=True) (rejected, p_corrected, a1, a2) = multipletests(comparison.p_value, alpha=0.05, method='holm') comparison['p_value_corrected'] = p_corrected comparison['rejected'] = rejected return comparison shared = data[data.shares > data.shares.quantile(0.98)][data.shares > data.likes * 10][['msg', 'shares']] top = 10 print('top %d out of %d' % (top, shared.shape[0])) sorted_data = shared.sort_values(by='shares', ascending=False)[:top] for i in sorted_data.index.values: print('shares:', sorted_data.shares[i], '\n', 'message:', sorted_data.msg[i][:200], '\n')
code
316827\cell_40
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png" ]
from scipy.stats import mannwhitneyu from statsmodels.sandbox.stats.multicomp import multipletests from statsmodels.stats.weightstats import zconfint import pandas as pd posts = pd.read_csv('../input/post.csv', parse_dates=['timeStamp']) comments = pd.read_csv('../input/comment.csv') com_count = comments.groupby('pid').count()['cid'] data = posts.join(com_count, on='pid', rsuffix='c')[['msg', 'likes', 'shares', 'cid', 'gid']] data.columns = ['msg', 'likes', 'shares', 'comments', 'gid'] data['msg_len'] = data.msg.apply(len) data.gid = data.gid.map({117291968282998: 1, 25160801076: 2, 1443890352589739: 3}) data.fillna(0, inplace=True) park = data[data.gid == 1] town = data[data.gid == 2] free = data[data.gid == 3] def conf_interval(field): """" Calculate confidence interval for given field """ def compare_means(field): """ Mann–Whitney test to compare mean values level """ mapping = {1: 'EPH', 2: 'UCT', 3: 'FSZ'} comparison = pd.DataFrame(columns=['group1', 'group2', 'p_value']) for i in range(1, 4): for j in range(1, 4): if i >= j: continue p = mannwhitneyu(data[data.gid == i][field], data[data.gid == j][field])[1] comparison = comparison.append({'group1': mapping[i], 'group2': mapping[j], 'p_value': p}, ignore_index=True) (rejected, p_corrected, a1, a2) = multipletests(comparison.p_value, alpha=0.05, method='holm') comparison['p_value_corrected'] = p_corrected comparison['rejected'] = rejected return comparison shared = data[data.shares > data.shares.quantile(0.98)][data.shares > data.likes * 10][['msg', 'shares']] top = 10 sorted_data = shared.sort_values(by='shares', ascending=False)[:top] likes = data[data.likes > data.likes.quantile(0.98)][data.likes > data.shares * 100][['msg', 'likes']] print('top %d out of %d' % (top, likes.shape[0])) sorted_data = likes.sort_values(by='likes', ascending=False)[:top] for i in sorted_data.index.values: print('likes:', sorted_data.likes[i], '\n', 'message:', sorted_data.msg[i][:300], '\n')
code
318069\cell_11
[ "text_plain_output_1.png" ]
import pandas as pd country = 'Philippines' df = pd.read_csv('../input/attacks_data_UTF8.csv', encoding='latin1', parse_dates=['Date'], infer_datetime_format=True, index_col=1) if country is not None: dfc = df.loc[df['Country'] == country] else: dfc = df country_rank = df.Country.value_counts().rank(numeric_only=True, ascending=False).loc[country] country_attacks = df.Country.value_counts()[country] country_killed = dfc.Killed.sum() country_injured = dfc.Injured.sum() print('%s is ranked %.0f with %d attacks resulting to %d deaths and %d injuries' % (country, country_rank, country_attacks, country_killed, country_injured))
code
318069\cell_14
[ "image_output_1.png", "text_plain_output_1.png" ]
import matplotlib.pylab as plt import pandas as pd country = 'Philippines' df = pd.read_csv('../input/attacks_data_UTF8.csv', encoding='latin1', parse_dates=['Date'], infer_datetime_format=True, index_col=1) if country is not None: dfc = df.loc[df['Country'] == country] else: dfc = df country_rank = df.Country.value_counts().rank(numeric_only=True, ascending=False).loc[country] country_attacks = df.Country.value_counts()[country] country_killed = dfc.Killed.sum() country_injured = dfc.Injured.sum() dfc.City.value_counts().plot(kind='bar', figsize=(17, 7)) plt.title('Number of attacks by city')
code
318069\cell_17
[ "image_output_1.png", "text_plain_output_1.png" ]
import matplotlib.pylab as plt import pandas as pd country = 'Philippines' df = pd.read_csv('../input/attacks_data_UTF8.csv', encoding='latin1', parse_dates=['Date'], infer_datetime_format=True, index_col=1) if country is not None: dfc = df.loc[df['Country'] == country] else: dfc = df country_rank = df.Country.value_counts().rank(numeric_only=True, ascending=False).loc[country] country_attacks = df.Country.value_counts()[country] country_killed = dfc.Killed.sum() country_injured = dfc.Injured.sum() dfc.groupby('City').sum()[['Victims', 'Killed', 'Injured']].sort_values(by='Victims', ascending=0).plot(kind='bar', figsize=(17, 7), subplots=True)
code
318069\cell_20
[ "text_plain_output_1.png" ]
import matplotlib.pylab as plt import pandas as pd country = 'Philippines' df = pd.read_csv('../input/attacks_data_UTF8.csv', encoding='latin1', parse_dates=['Date'], infer_datetime_format=True, index_col=1) if country is not None: dfc = df.loc[df['Country'] == country] else: dfc = df country_rank = df.Country.value_counts().rank(numeric_only=True, ascending=False).loc[country] country_attacks = df.Country.value_counts()[country] country_killed = dfc.Killed.sum() country_injured = dfc.Injured.sum() dfc.groupby('City').sum()[['Victims', 'Killed', 'Injured']].sort_values(by='Victims', ascending=0).plot(kind='bar', figsize=(17, 7), subplots=True) most_victim = dfc.sort_values(by='Victims', ascending=False).head(1) print("Attack with most victims happened on %s on %s with %d killed, %d injuries with a total of %d victims with the following article: \n'%s' \n" % (most_victim.City.values[0], most_victim.index.strftime('%B %d,%Y')[0], most_victim.Killed, most_victim.Injured, most_victim.Victims, '%s' % most_victim.Description.values[0])) most_killed = dfc.sort_values(by='Killed', ascending=False).head(1) print("Attack with the most deaths happened on %s on %s with %d killed, %d injuries with a total of %d victims with the following article: \n'%s' \n" % (most_killed.City.values[0], most_killed.index.strftime('%B %d,%Y')[0], most_killed.Killed, most_killed.Injured, most_killed.Victims, '%s' % most_killed.Description.values[0])) most_injuries = dfc.sort_values(by='Injured', ascending=False).head(1) print("Attack with the most injuries happened on %s on %s with %d killed, %d injuries with a total of %d victims with the following article: \n'%s' \n" % (most_injuries.City.values[0], most_injuries.index.strftime('%B %d,%Y')[0], most_injuries.Killed, most_injuries.Injured, most_injuries.Victims, '%s' % most_injuries.Description.values[0]))
code
318069\cell_23
[ "image_output_1.png", "text_plain_output_1.png" ]
import matplotlib.pylab as plt import pandas as pd country = 'Philippines' df = pd.read_csv('../input/attacks_data_UTF8.csv', encoding='latin1', parse_dates=['Date'], infer_datetime_format=True, index_col=1) if country is not None: dfc = df.loc[df['Country'] == country] else: dfc = df country_rank = df.Country.value_counts().rank(numeric_only=True, ascending=False).loc[country] country_attacks = df.Country.value_counts()[country] country_killed = dfc.Killed.sum() country_injured = dfc.Injured.sum() dfc.groupby('City').sum()[['Victims', 'Killed', 'Injured']].sort_values(by='Victims', ascending=0).plot(kind='bar', figsize=(17, 7), subplots=True) most_victim = dfc.sort_values(by='Victims', ascending=False).head(1) most_killed = dfc.sort_values(by='Killed', ascending=False).head(1) most_injuries = dfc.sort_values(by='Injured', ascending=False).head(1) dfc.groupby(dfc.index.year).sum()[['Victims', 'Killed', 'Injured']].sort_values(by='Victims', ascending=0).plot(kind='bar', figsize=(17, 7), subplots=False)
code
318069\cell_26
[ "image_output_1.png", "image_output_2.png", "text_plain_output_1.png" ]
from matplotlib.pylab import rcParams import matplotlib.pylab as plt import pandas as pd country = 'Philippines' df = pd.read_csv('../input/attacks_data_UTF8.csv', encoding='latin1', parse_dates=['Date'], infer_datetime_format=True, index_col=1) if country is not None: dfc = df.loc[df['Country'] == country] else: dfc = df country_rank = df.Country.value_counts().rank(numeric_only=True, ascending=False).loc[country] country_attacks = df.Country.value_counts()[country] country_killed = dfc.Killed.sum() country_injured = dfc.Injured.sum() dfc.groupby('City').sum()[['Victims', 'Killed', 'Injured']].sort_values(by='Victims', ascending=0).plot(kind='bar', figsize=(17, 7), subplots=True) most_victim = dfc.sort_values(by='Victims', ascending=False).head(1) most_killed = dfc.sort_values(by='Killed', ascending=False).head(1) most_injuries = dfc.sort_values(by='Injured', ascending=False).head(1) dfc.groupby(dfc.index.year).sum()[['Victims', 'Killed', 'Injured']].sort_values(by='Victims', ascending=0).plot(kind='bar', figsize=(17, 7), subplots=False) killedbyday = dfc.groupby([dfc.index.map(lambda x: x.weekday), dfc.index.year], sort=True).agg({'Killed': 'sum'}) rcParams['figure.figsize'] = (20, 10) killedbyday.unstack(level=0).plot(kind='bar', subplots=False) killedbyday.unstack(level=1).plot(kind='bar', subplots=False)
code
318069\cell_27
[ "image_output_1.png", "image_output_2.png", "text_plain_output_1.png" ]
from matplotlib.pylab import rcParams import matplotlib.pylab as plt import pandas as pd country = 'Philippines' df = pd.read_csv('../input/attacks_data_UTF8.csv', encoding='latin1', parse_dates=['Date'], infer_datetime_format=True, index_col=1) if country is not None: dfc = df.loc[df['Country'] == country] else: dfc = df country_rank = df.Country.value_counts().rank(numeric_only=True, ascending=False).loc[country] country_attacks = df.Country.value_counts()[country] country_killed = dfc.Killed.sum() country_injured = dfc.Injured.sum() dfc.groupby('City').sum()[['Victims', 'Killed', 'Injured']].sort_values(by='Victims', ascending=0).plot(kind='bar', figsize=(17, 7), subplots=True) most_victim = dfc.sort_values(by='Victims', ascending=False).head(1) most_killed = dfc.sort_values(by='Killed', ascending=False).head(1) most_injuries = dfc.sort_values(by='Injured', ascending=False).head(1) dfc.groupby(dfc.index.year).sum()[['Victims', 'Killed', 'Injured']].sort_values(by='Victims', ascending=0).plot(kind='bar', figsize=(17, 7), subplots=False) killedbyday = dfc.groupby([dfc.index.map(lambda x: x.weekday), dfc.index.year], sort=True).agg({'Killed': 'sum'}) rcParams['figure.figsize'] = (20, 10) killedbymonth = dfc.groupby([dfc.index.map(lambda x: x.month), dfc.index.year], sort=True).agg({'Killed': 'sum'}) rcParams['figure.figsize'] = (20, 10) killedbymonth.unstack(level=0).plot(kind='bar', subplots=False) killedbymonth.unstack(level=1).plot(kind='bar', subplots=False)
code
318221\cell_5
[ "text_html_output_1.png" ]
import pandas as pd import sqlite3 con = sqlite3.connect('../input/database.sqlite') post = pd.read_sql_query('SELECT * FROM post', con) comment = pd.read_sql_query('SELECT * FROM comment', con) like = pd.read_sql_query('SELECT * FROM like', con) rmember = pd.read_sql_query('SELECT distinct id as rid, name rname FROM member', con) comment = pd.merge(comment, rmember, left_on='rid', right_on='rid', how='left') rmember.head(3)
code
318221\cell_6
[ "text_plain_output_1.png" ]
import pandas as pd import sqlite3 con = sqlite3.connect('../input/database.sqlite') post = pd.read_sql_query('SELECT * FROM post', con) comment = pd.read_sql_query('SELECT * FROM comment', con) like = pd.read_sql_query('SELECT * FROM like', con) rmember = pd.read_sql_query('SELECT distinct id as rid, name rname FROM member', con) comment = pd.merge(comment, rmember, left_on='rid', right_on='rid', how='left') comment['gid'].value_counts()
code
318221\cell_7
[ "text_plain_output_1.png" ]
import pandas as pd import sqlite3 con = sqlite3.connect('../input/database.sqlite') post = pd.read_sql_query('SELECT * FROM post', con) comment = pd.read_sql_query('SELECT * FROM comment', con) like = pd.read_sql_query('SELECT * FROM like', con) rmember = pd.read_sql_query('SELECT distinct id as rid, name rname FROM member', con) comment = pd.merge(comment, rmember, left_on='rid', right_on='rid', how='left') comment[(comment.gid == '117291968282998') & (comment.rid == '')]['name'].value_counts().head(10)
code
318221\cell_8
[ "text_plain_output_1.png" ]
import pandas as pd import sqlite3 con = sqlite3.connect('../input/database.sqlite') post = pd.read_sql_query('SELECT * FROM post', con) comment = pd.read_sql_query('SELECT * FROM comment', con) like = pd.read_sql_query('SELECT * FROM like', con) rmember = pd.read_sql_query('SELECT distinct id as rid, name rname FROM member', con) comment = pd.merge(comment, rmember, left_on='rid', right_on='rid', how='left') comment[(comment.gid == '117291968282998') & (comment.rid != '')]['rname'].value_counts().head(10)
code
318372\cell_5
[ "text_html_output_1.png" ]
import pandas as pd import sqlite3 con = sqlite3.connect('../input/database.sqlite') post = pd.read_sql_query('SELECT * FROM post', con) comment = pd.read_sql_query('SELECT * FROM comment', con) like = pd.read_sql_query('SELECT * FROM like', con) rmember = pd.read_sql_query('SELECT distinct id as rid, name rname FROM member', con) comment = pd.merge(comment, rmember, left_on='rid', right_on='rid', how='left') comment.head(4)
code
318372\cell_6
[ "text_plain_output_1.png" ]
import pandas as pd import sqlite3 con = sqlite3.connect('../input/database.sqlite') post = pd.read_sql_query('SELECT * FROM post', con) comment = pd.read_sql_query('SELECT * FROM comment', con) like = pd.read_sql_query('SELECT * FROM like', con) rmember = pd.read_sql_query('SELECT distinct id as rid, name rname FROM member', con) comment = pd.merge(comment, rmember, left_on='rid', right_on='rid', how='left') comment.gid = comment.gid.map({'117291968282998': 'EPH', '25160801076': 'UCT', '1443890352589739': 'FSZ'}) comment['gid'].value_counts()
code
318372\cell_7
[ "text_plain_output_1.png" ]
import pandas as pd import sqlite3 con = sqlite3.connect('../input/database.sqlite') post = pd.read_sql_query('SELECT * FROM post', con) comment = pd.read_sql_query('SELECT * FROM comment', con) like = pd.read_sql_query('SELECT * FROM like', con) rmember = pd.read_sql_query('SELECT distinct id as rid, name rname FROM member', con) comment = pd.merge(comment, rmember, left_on='rid', right_on='rid', how='left') comment.gid = comment.gid.map({'117291968282998': 'EPH', '25160801076': 'UCT', '1443890352589739': 'FSZ'}) comment['gid'].value_counts() comment[(comment.gid == 'EPH') & (comment.rid == '')]['name'].value_counts().head(10)
code
318372\cell_8
[ "text_plain_output_1.png" ]
import pandas as pd import sqlite3 con = sqlite3.connect('../input/database.sqlite') post = pd.read_sql_query('SELECT * FROM post', con) comment = pd.read_sql_query('SELECT * FROM comment', con) like = pd.read_sql_query('SELECT * FROM like', con) rmember = pd.read_sql_query('SELECT distinct id as rid, name rname FROM member', con) comment = pd.merge(comment, rmember, left_on='rid', right_on='rid', how='left') comment.gid = comment.gid.map({'117291968282998': 'EPH', '25160801076': 'UCT', '1443890352589739': 'FSZ'}) comment['gid'].value_counts() comment[(comment.gid == 'EPH') & (comment.rid != '')]['rname'].value_counts().head(10)
code
320866\cell_3
[ "image_output_1.png", "text_plain_output_1.png" ]
from dateutil.parser import parse import matplotlib.pyplot as plt import pandas as pd import numpy as np import pandas as pd data = pd.read_csv('../input/3-Airplane_Crashes_Since_1908.txt') import matplotlib.pyplot as plt from dateutil.parser import parse years = [] for i in range(len(data)): years.append(parse(data.Date[i]).year) data.Fatalities = data.Fatalities.fillna(data.Fatalities.mean()) temp = zip(years, data.Fatalities) temp = [(x, y) for (x, y) in temp if y > 50] temp = pd.DataFrame(temp, columns=['massive_years', 'Fatalities']) counts = temp.massive_years.value_counts() plt.figure(figsize=(11, 7)) plt.bar(counts.index, counts.values) plt.ylabel('Number of Massive Crashes(fatalities>50)', fontsize=15) plt.xlabel('Year', fontsize=15) plt.yticks(fontsize=15) plt.xticks(fontsize=15)
code
320908\cell_4
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd import sqlite3 conn = sqlite3.connect('../input/database.sqlite') teams = pd.read_sql_query('select * from Teams', conn) users = pd.read_sql_query('select * from Users', conn) teammembers = pd.read_sql_query('select * from TeamMemberships', conn) teams_q = teammembers.groupby('TeamId').UserId.count() teams_q = teams_q[teams_q > 1].reset_index() teammembers_cut = teammembers.merge(teams_q, on='TeamId') users_q = teammembers_cut.groupby('UserId_x').TeamId.count().reset_index() teammembers_cut = teammembers_cut.merge(users_q, left_on='UserId_x', right_on='UserId_x') teammembers_cut = teammembers_cut.merge(teams, left_on='TeamId_x', right_on='Id') teammembers_cut = teammembers_cut.merge(users, left_on='UserId_x', right_on='Id') tm4graph = teammembers_cut[['TeamId_x', 'UserId_x']] tm4graph['TeamId_x'] = 'Team_' + tm4graph['TeamId_x'].astype('str') tm4graph['UserId_x'] = 'User_' + tm4graph['UserId_x'].astype('str')
code
320908\cell_7
[ "application_vnd.jupyter.stderr_output_1.png" ]
from scipy.sparse import spdiags, coo_matrix import networkx as nx import numpy as np import numpy as np import pandas as pd import plotly import sqlite3 conn = sqlite3.connect('../input/database.sqlite') teams = pd.read_sql_query('select * from Teams', conn) users = pd.read_sql_query('select * from Users', conn) teammembers = pd.read_sql_query('select * from TeamMemberships', conn) teams_q = teammembers.groupby('TeamId').UserId.count() teams_q = teams_q[teams_q > 1].reset_index() teammembers_cut = teammembers.merge(teams_q, on='TeamId') users_q = teammembers_cut.groupby('UserId_x').TeamId.count().reset_index() teammembers_cut = teammembers_cut.merge(users_q, left_on='UserId_x', right_on='UserId_x') teammembers_cut = teammembers_cut.merge(teams, left_on='TeamId_x', right_on='Id') teammembers_cut = teammembers_cut.merge(users, left_on='UserId_x', right_on='Id') tm4graph = teammembers_cut[['TeamId_x', 'UserId_x']] tm4graph['TeamId_x'] = 'Team_' + tm4graph['TeamId_x'].astype('str') tm4graph['UserId_x'] = 'User_' + tm4graph['UserId_x'].astype('str') from scipy.sparse import spdiags, coo_matrix import scipy as sp import numpy as np import matplotlib.pyplot as plt def forceatlas2_layout(G, iterations=10, linlog=False, pos=None, nohubs=False, kr=0.001, k=None, dim=2): """ Options values are g The graph to layout iterations Number of iterations to do linlog Whether to use linear or log repulsion random_init Start with a random position If false, start with FR avoidoverlap Whether to avoid overlap of points degreebased Degree based repulsion """ for n in G: G.node[n]['prevcs'] = 0 G.node[n]['currcs'] = 0 A = nx.to_scipy_sparse_matrix(G, dtype='f') (nnodes, _) = A.shape try: A = A.tolil() except Exception as e: A = coo_matrix(A).tolil() if pos is None: pos = np.asarray(np.random.random((nnodes, dim)), dtype=A.dtype) else: pos = pos.astype(A.dtype) if k is None: k = np.sqrt(1.0 / nnodes) t = 0.1 dt = t / float(iterations + 1) displacement = np.zeros((dim, nnodes)) for iteration in range(iterations): displacement *= 0 for i in range(A.shape[0]): delta = (pos[i] - pos).T distance = np.sqrt((delta ** 2).sum(axis=0)) distance = np.where(distance < 0.01, 0.01, distance) Ai = np.asarray(A.getrowview(i).toarray()) Dist = k * k / distance ** 2 if nohubs: Dist = Dist / float(Ai.sum(axis=1) + 1) if linlog: Dist = np.log(Dist + 1) displacement[:, i] += (delta * (Dist - Ai * distance / k)).sum(axis=1) length = np.sqrt((displacement ** 2).sum(axis=0)) length = np.where(length < 0.01, 0.01, length) pos += (displacement * t / length).T t -= dt return dict(zip(G, pos)) axis = dict(showline=False, zeroline=False, showgrid=False, showticklabels=False, title='') layout = Layout(title='Kaggle teams/users universe', font=Font(size=12), showlegend=True, autosize=False, width=800, height=800, xaxis=XAxis(axis), yaxis=YAxis(axis), margin=Margin(l=40, r=40, b=85, t=100), hovermode='closest', annotations=Annotations([Annotation(showarrow=False, text='', xref='paper', yref='paper', x=0, y=-0.1, xanchor='left', yanchor='bottom', font=Font(size=14))])) edges_to_use = 22000 G = nx.Graph() G.add_edges_from(tm4graph.values[0:edges_to_use]) pos = forceatlas2_layout(G, iterations=300, nohubs=True) N = G.number_of_nodes() E = G.edges() labels = G.nodes() Xv_teams = [pos[k][0] for k in labels if 'Team' in k] Yv_teams = [pos[k][1] for k in labels if 'Team' in k] Xv_users = [pos[k][0] for k in labels if 'User' in k] Yv_users = [pos[k][1] for k in labels if 'User' in k] labels_team = [teammembers_cut.iloc[0:edges_to_use, :].loc[teammembers_cut.TeamId_x == int(k.replace('Team_', '')), 'TeamName'].values[0] for k in labels if 'Team' in k] labels_users = [teammembers_cut.iloc[0:edges_to_use, :].loc[teammembers_cut.UserId_x == int(k.replace('User_', '')), 'DisplayName'].values[0] for k in labels if 'User' in k] Xed = [] Yed = [] for edge in E: Xed += [pos[edge[0]][0], pos[edge[1]][0], None] Yed += [pos[edge[0]][1], pos[edge[1]][1], None] trace3 = Scatter(x=Xed, y=Yed, mode='lines', line=Line(color='rgb(200,200,200)', width=2), name='Links', hoverinfo='none') trace4 = Scatter(x=Xv_teams, y=Yv_teams, mode='markers', name='Teams', marker=Marker(symbol='dot', size=[teammembers_cut.iloc[0:edges_to_use, :].loc[teammembers_cut.TeamId_x == int(k.replace('Team_', '')), 'UserId_y'].values[0] for k in labels if 'Team' in k], color='rgb(146,209,81)', line=Line(color='rgb(50,50,50)', width=0.5)), text=map(lambda x: ['Team: ' + u''.join(x[0]).encode('utf8').strip() + '<br>Users: ' + str(','.join(x[1]).encode('utf8')) + '<br>'], zip(labels_team, [teammembers_cut.iloc[0:edges_to_use, :].loc[teammembers_cut.TeamId_x == int(k.replace('Team_', '')), 'DisplayName'].values.tolist() for k in labels if 'Team' in k])), hoverinfo='text') trace5 = Scatter(x=Xv_users, y=Yv_users, mode='markers', name='Users', marker=Marker(symbol='dot', size=[teammembers_cut.iloc[0:edges_to_use, :].loc[teammembers_cut.UserId_x == int(k.replace('User_', '')), 'TeamId_y'].values[0] * 0.5 for k in labels if 'User' in k], color='#000000', line=Line(color='rgb(50,50,50)', width=0.5)), text=map(lambda x: ['User: ' + u''.join(x[0]).encode('utf8').strip() + '<br>Teams: ' + str(','.join(x[1]).encode('utf8')) + '<br>'], zip(labels_users, [teammembers_cut.iloc[0:edges_to_use, :].loc[teammembers_cut.UserId_x == int(k.replace('User_', '')), 'TeamName'].values.tolist() for k in labels if 'User' in k])), hoverinfo='text') data1 = Data([trace3, trace4, trace5]) fig1 = Figure(data=data1, layout=layout) plotly.offline.iplot(fig1)
code
322662\cell_1
[ "text_plain_output_1.png" ]
from subprocess import check_output import datetime import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import datetime from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8')) def dateparse(x): try: print('Inside DateParse') return pd.datetime.strptime(x, '%Y-%m-%d %H:%M:%S') except TypeError as err: print('My exception occurred, value:', err.value) return None d = pd.read_csv('../input/trainView.csv', header=0, names=['train_id', 'status', 'next_station', 'service', 'dest', 'lon', 'lat', 'source', 'track_change', 'track', 'date', 'timeStamp0', 'timeStamp1'], dtype={'train_id': str, 'status': str, 'next_station': str, 'service': str, 'dest': str, 'lon': str, 'lat': str, 'source': str, 'track_change': str, 'track': str, 'date': str, 'timeStamp0': datetime.datetime, 'timeStamp1': datetime.datetime})
code
322662\cell_2
[ "text_html_output_1.png" ]
from subprocess import check_output import datetime import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import datetime from subprocess import check_output def dateparse(x): try: return pd.datetime.strptime(x, '%Y-%m-%d %H:%M:%S') except TypeError as err: return None d = pd.read_csv('../input/trainView.csv', header=0, names=['train_id', 'status', 'next_station', 'service', 'dest', 'lon', 'lat', 'source', 'track_change', 'track', 'date', 'timeStamp0', 'timeStamp1'], dtype={'train_id': str, 'status': str, 'next_station': str, 'service': str, 'dest': str, 'lon': str, 'lat': str, 'source': str, 'track_change': str, 'track': str, 'date': str, 'timeStamp0': datetime.datetime, 'timeStamp1': datetime.datetime}) d.head() d['timeStamp0'] = pd.to_datetime(d['timeStamp0'], format='%Y-%m-%d %H:%M:%S') d['timeStamp1'] = pd.to_datetime(d['timeStamp1'], format='%Y-%m-%d %H:%M:%S', errors='coerce') d.head()
code
322662\cell_3
[ "text_plain_output_1.png" ]
""" def getDeltaTime(x): r=(x[1] - x[0]).total_seconds() return r # It might make sense to add delta_s to the next version d['delta_s']=d[['timeStamp0','timeStamp1']].apply(getDeltaTime, axis=1) """
code
322963\cell_2
[ "text_plain_output_1.png" ]
from subprocess import check_output import numpy as np import pandas as pd from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8'))
code
322963\cell_5
[ "text_plain_output_1.png" ]
import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) zika_df = pd.read_csv(os.path.join('..', 'input', 'cdc_zika.csv'), low_memory=False) keep_rows = pd.notnull(zika_df['report_date']) zika_df = zika_df[keep_rows] print('Removed {:d} out of {:d} rows with missing report_date.'.format(len(keep_rows) - sum(keep_rows), len(keep_rows))) zika_df.index = pd.to_datetime([d.replace('_', '-') for d in zika_df['report_date']], format='%Y-%m-%d') zika_df.sort_index(inplace=True) zika_df.index.rename('report_date', inplace=True) zika_df.drop('report_date', axis=1, inplace=True)
code
322985\cell_3
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt import numpy as np import pandas as pd import sklearn.linear_model as sk full_data_set = pd.read_csv('../input/nflplaybyplay2015.csv', low_memory=False) Pass_Plays = full_data_set.loc[full_data_set.PlayType == 'Pass'] Sack_Plays = full_data_set.loc[full_data_set.PlayType == 'Sack'] P_S_data = pd.concat([Pass_Plays, Sack_Plays]) good_columns = ['Drive', 'qtr', 'down', 'TimeUnder', 'TimeSecs', 'PlayTimeDiff', 'yrdline100', 'ydstogo'] good_columns += ['ScoreDiff', 'PosTeamScore', 'DefTeamScore'] good_columns += ['Sack'] uncleaned_data = P_S_data[good_columns] uncleaned_data.head()
code
322985\cell_4
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt import numpy as np import pandas as pd import sklearn.linear_model as sk full_data_set = pd.read_csv('../input/nflplaybyplay2015.csv', low_memory=False) Pass_Plays = full_data_set.loc[full_data_set.PlayType == 'Pass'] Sack_Plays = full_data_set.loc[full_data_set.PlayType == 'Sack'] P_S_data = pd.concat([Pass_Plays, Sack_Plays]) good_columns = ['Drive', 'qtr', 'down', 'TimeUnder', 'TimeSecs', 'PlayTimeDiff', 'yrdline100', 'ydstogo'] good_columns += ['ScoreDiff', 'PosTeamScore', 'DefTeamScore'] good_columns += ['Sack'] uncleaned_data = P_S_data[good_columns] uncleaned_data.qtr.unique()
code
322985\cell_5
[ "application_vnd.jupyter.stderr_output_1.png", "text_html_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt import numpy as np import pandas as pd import sklearn.linear_model as sk full_data_set = pd.read_csv('../input/nflplaybyplay2015.csv', low_memory=False) Pass_Plays = full_data_set.loc[full_data_set.PlayType == 'Pass'] Sack_Plays = full_data_set.loc[full_data_set.PlayType == 'Sack'] P_S_data = pd.concat([Pass_Plays, Sack_Plays]) good_columns = ['Drive', 'qtr', 'down', 'TimeUnder', 'TimeSecs', 'PlayTimeDiff', 'yrdline100', 'ydstogo'] good_columns += ['ScoreDiff', 'PosTeamScore', 'DefTeamScore'] good_columns += ['Sack'] uncleaned_data = P_S_data[good_columns] uncleaned_data.qtr.unique() def quarter_binary(df, name, number): df[name] = np.where(df['qtr'] == number, 1, 0) return df for x in [['qt1', 1], ['qt2', 2], ['qt3', 3], ['qt4', 4], ['qt5', 5]]: uncleaned_data = quarter_binary(uncleaned_data, x[0], x[1]) del uncleaned_data['qtr'] uncleaned_data.head()
code
323155\cell_13
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import sklearn.linear_model as sk import matplotlib.pyplot as plt import numpy as np import pandas as pd import sklearn.linear_model as sk from sklearn import preprocessing full_data_set = pd.read_csv('../input/nflplaybyplay2015.csv', low_memory=False) Pass_Plays = full_data_set.loc[full_data_set.PlayType == 'Pass'] Sack_Plays = full_data_set.loc[full_data_set.PlayType == 'Sack'] P_S_data = pd.concat([Pass_Plays, Sack_Plays]) good_columns = ['Drive', 'qtr', 'down', 'TimeUnder', 'TimeSecs', 'PlayTimeDiff', 'yrdline100', 'ydstogo'] good_columns += ['ScoreDiff', 'PosTeamScore', 'DefTeamScore'] good_columns += ['Sack'] uncleaned_data = P_S_data[good_columns] uncleaned_data.qtr.unique() def quarter_binary(df, name, number): df[name] = np.where(df['qtr'] == number, 1, 0) return df for x in [['qt1', 1], ['qt2', 2], ['qt3', 3], ['qt4', 4], ['qt5', 5]]: uncleaned_data = quarter_binary(uncleaned_data, x[0], x[1]) del uncleaned_data['qtr'] logreg = sk.LogisticRegressionCV() logreg.fit(X_all, y_all) coef_array = np.abs(logreg.coef_) x = np.arange(1, coef_array.shape[1] + 1, 1) plt.scatter(x, coef_array, marker='x', color='r') plt.axhline(0, color='b')
code
323155\cell_14
[ "application_vnd.jupyter.stderr_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt import numpy as np import pandas as pd import sklearn.linear_model as sk from sklearn import preprocessing full_data_set = pd.read_csv('../input/nflplaybyplay2015.csv', low_memory=False) Pass_Plays = full_data_set.loc[full_data_set.PlayType == 'Pass'] Sack_Plays = full_data_set.loc[full_data_set.PlayType == 'Sack'] P_S_data = pd.concat([Pass_Plays, Sack_Plays]) good_columns = ['Drive', 'qtr', 'down', 'TimeUnder', 'TimeSecs', 'PlayTimeDiff', 'yrdline100', 'ydstogo'] good_columns += ['ScoreDiff', 'PosTeamScore', 'DefTeamScore'] good_columns += ['Sack'] uncleaned_data = P_S_data[good_columns] uncleaned_data.qtr.unique() def quarter_binary(df, name, number): df[name] = np.where(df['qtr'] == number, 1, 0) return df for x in [['qt1', 1], ['qt2', 2], ['qt3', 3], ['qt4', 4], ['qt5', 5]]: uncleaned_data = quarter_binary(uncleaned_data, x[0], x[1]) del uncleaned_data['qtr'] cleaned_data = uncleaned_data.dropna() explanatory_variables = cleaned_data.columns explanatory_variables[1]
code
323155\cell_6
[ "image_output_1.png", "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt import numpy as np import pandas as pd import sklearn.linear_model as sk from sklearn import preprocessing full_data_set = pd.read_csv('../input/nflplaybyplay2015.csv', low_memory=False) Pass_Plays = full_data_set.loc[full_data_set.PlayType == 'Pass'] Sack_Plays = full_data_set.loc[full_data_set.PlayType == 'Sack'] P_S_data = pd.concat([Pass_Plays, Sack_Plays]) good_columns = ['Drive', 'qtr', 'down', 'TimeUnder', 'TimeSecs', 'PlayTimeDiff', 'yrdline100', 'ydstogo'] good_columns += ['ScoreDiff', 'PosTeamScore', 'DefTeamScore'] good_columns += ['Sack'] uncleaned_data = P_S_data[good_columns] uncleaned_data.qtr.unique()
code
323155\cell_7
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt import numpy as np import pandas as pd import sklearn.linear_model as sk from sklearn import preprocessing full_data_set = pd.read_csv('../input/nflplaybyplay2015.csv', low_memory=False) Pass_Plays = full_data_set.loc[full_data_set.PlayType == 'Pass'] Sack_Plays = full_data_set.loc[full_data_set.PlayType == 'Sack'] P_S_data = pd.concat([Pass_Plays, Sack_Plays]) good_columns = ['Drive', 'qtr', 'down', 'TimeUnder', 'TimeSecs', 'PlayTimeDiff', 'yrdline100', 'ydstogo'] good_columns += ['ScoreDiff', 'PosTeamScore', 'DefTeamScore'] good_columns += ['Sack'] uncleaned_data = P_S_data[good_columns] uncleaned_data.qtr.unique() def quarter_binary(df, name, number): df[name] = np.where(df['qtr'] == number, 1, 0) return df for x in [['qt1', 1], ['qt2', 2], ['qt3', 3], ['qt4', 4], ['qt5', 5]]: uncleaned_data = quarter_binary(uncleaned_data, x[0], x[1]) del uncleaned_data['qtr']
code
323429\cell_2
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png", "text_plain_output_3.png", "text_plain_output_4.png", "text_plain_output_5.png", "text_plain_output_6.png" ]
from subprocess import check_output import sqlite3 import numpy as np import pandas as pd import sqlite3 import nltk import numpy as np from sklearn.feature_extraction.text import CountVectorizer import scipy from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8')) con = sqlite3.connect('../input/database.sqlite') cur = con.cursor() sqlString = ' \n SELECT complaint_id, product, consumer_complaint_narrative, company\n FROM consumer_complaints\n WHERE product = "Mortgage" AND \n consumer_complaint_narrative != ""\n ' cur.execute(sqlString) complaints = cur.fetchall() con.close()
code
323429\cell_4
[ "text_plain_output_1.png" ]
from subprocess import check_output import sqlite3 import numpy as np import pandas as pd import sqlite3 import nltk import numpy as np from sklearn.feature_extraction.text import CountVectorizer import scipy from subprocess import check_output con = sqlite3.connect('../input/database.sqlite') cur = con.cursor() sqlString = ' \n SELECT complaint_id, product, consumer_complaint_narrative, company\n FROM consumer_complaints\n WHERE product = "Mortgage" AND \n consumer_complaint_narrative != ""\n ' cur.execute(sqlString) complaints = cur.fetchall() con.close() complaint_list = [] for i in range(len(complaints)): complaint_list.append(complaints[i][2])
code
324023\cell_2
[ "text_plain_output_1.png" ]
from subprocess import check_output import numpy as np import pandas as pd from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8'))
code
324023\cell_3
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/2016-FCC-New-Coders-Survey-Data.csv')
code
324023\cell_4
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/2016-FCC-New-Coders-Survey-Data.csv') data.columns.values
code
324276\cell_1
[ "application_vnd.jupyter.stderr_output_1.png" ]
import numpy as np import matplotlib.pyplot as plt import pandas as pd import colorsys plt.style.use('seaborn-talk') df = pd.read_csv('../input/2016-FCC-New-Coders-Survey-Data.csv', sep=',')
code
324276\cell_12
[ "image_output_1.png" ]
import colorsys import matplotlib.pyplot as plt labels = df.Gender.value_counts().index N = len(df.EmploymentField.value_counts().index) HSV_tuples = [(x*1.0/N, 0.5, 0.5) for x in range(N)] RGB_tuples = list(map(lambda x: colorsys.hsv_to_rgb(*x), HSV_tuples)) patches, texts = plt.pie(df.Gender.value_counts(), colors=RGB_tuples, startangle=90) plt.axes().set_aspect('equal', 'datalim') plt.legend(patches, labels, bbox_to_anchor=(1.05,1)) plt.title("Gender") plt.show() N = len(df.JobRoleInterest.value_counts().index) HSV_tuples = [(x*1.0/N, 0.5, 0.5) for x in range(N)] RGB_tuples = list(map(lambda x: colorsys.hsv_to_rgb(*x), HSV_tuples)) labels = df.JobRoleInterest.value_counts().index colors = ['OliveDrab', 'Orange', 'OrangeRed', 'DarkCyan', 'Salmon', 'Sienna', 'Maroon', 'LightSlateGrey', 'DimGray'] patches, texts = plt.pie(df.JobRoleInterest.value_counts(), colors=RGB_tuples, startangle=90) plt.axes().set_aspect('equal', 'datalim') plt.legend(patches, labels, bbox_to_anchor=(1.25, 1)) plt.title("Job Role Interest") plt.show() N = len(df.EmploymentField.value_counts().index) HSV_tuples = [(x * 1.0 / N, 0.5, 0.5) for x in range(N)] RGB_tuples = list(map(lambda x: colorsys.hsv_to_rgb(*x), HSV_tuples)) labels = df.EmploymentField.value_counts().index (patches, texts) = plt.pie(df.EmploymentField.value_counts(), colors=RGB_tuples, startangle=90) plt.axes().set_aspect('equal', 'datalim') plt.legend(patches, labels, bbox_to_anchor=(1.3, 1)) plt.title('Employment Field') plt.show()
code
324276\cell_3
[ "image_output_1.png" ]
import matplotlib.pyplot as plt df.Age.hist(bins=100) plt.xlabel('Age') plt.title('Distribution of Age') plt.show()
code
324276\cell_6
[ "image_output_1.png" ]
import colorsys import matplotlib.pyplot as plt labels = df.Gender.value_counts().index N = len(df.EmploymentField.value_counts().index) HSV_tuples = [(x * 1.0 / N, 0.5, 0.5) for x in range(N)] RGB_tuples = list(map(lambda x: colorsys.hsv_to_rgb(*x), HSV_tuples)) (patches, texts) = plt.pie(df.Gender.value_counts(), colors=RGB_tuples, startangle=90) plt.axes().set_aspect('equal', 'datalim') plt.legend(patches, labels, bbox_to_anchor=(1.05, 1)) plt.title('Gender') plt.show()
code
324276\cell_9
[ "image_output_1.png" ]
import colorsys import matplotlib.pyplot as plt labels = df.Gender.value_counts().index N = len(df.EmploymentField.value_counts().index) HSV_tuples = [(x*1.0/N, 0.5, 0.5) for x in range(N)] RGB_tuples = list(map(lambda x: colorsys.hsv_to_rgb(*x), HSV_tuples)) patches, texts = plt.pie(df.Gender.value_counts(), colors=RGB_tuples, startangle=90) plt.axes().set_aspect('equal', 'datalim') plt.legend(patches, labels, bbox_to_anchor=(1.05,1)) plt.title("Gender") plt.show() N = len(df.JobRoleInterest.value_counts().index) HSV_tuples = [(x * 1.0 / N, 0.5, 0.5) for x in range(N)] RGB_tuples = list(map(lambda x: colorsys.hsv_to_rgb(*x), HSV_tuples)) labels = df.JobRoleInterest.value_counts().index colors = ['OliveDrab', 'Orange', 'OrangeRed', 'DarkCyan', 'Salmon', 'Sienna', 'Maroon', 'LightSlateGrey', 'DimGray'] (patches, texts) = plt.pie(df.JobRoleInterest.value_counts(), colors=RGB_tuples, startangle=90) plt.axes().set_aspect('equal', 'datalim') plt.legend(patches, labels, bbox_to_anchor=(1.25, 1)) plt.title('Job Role Interest') plt.show()
code
324293\cell_1
[ "application_vnd.jupyter.stderr_output_1.png" ]
import numpy as np import matplotlib.pyplot as plt import pandas as pd import colorsys plt.style.use('seaborn-talk') df = pd.read_csv('../input/2016-FCC-New-Coders-Survey-Data.csv', sep=',')
code
324293\cell_12
[ "image_output_1.png" ]
import colorsys import matplotlib.pyplot as plt labels = df.Gender.value_counts().index N = len(df.EmploymentField.value_counts().index) HSV_tuples = [(x*1.0/N, 0.5, 0.5) for x in range(N)] RGB_tuples = list(map(lambda x: colorsys.hsv_to_rgb(*x), HSV_tuples)) patches, texts = plt.pie(df.Gender.value_counts(), colors=RGB_tuples, startangle=90) plt.axes().set_aspect('equal', 'datalim') plt.legend(patches, labels, bbox_to_anchor=(1.05,1)) plt.title("Gender") plt.show() N = len(df.JobRoleInterest.value_counts().index) HSV_tuples = [(x*1.0/N, 0.5, 0.5) for x in range(N)] RGB_tuples = list(map(lambda x: colorsys.hsv_to_rgb(*x), HSV_tuples)) labels = df.JobRoleInterest.value_counts().index colors = ['OliveDrab', 'Orange', 'OrangeRed', 'DarkCyan', 'Salmon', 'Sienna', 'Maroon', 'LightSlateGrey', 'DimGray'] patches, texts = plt.pie(df.JobRoleInterest.value_counts(), colors=RGB_tuples, startangle=90) plt.axes().set_aspect('equal', 'datalim') plt.legend(patches, labels, bbox_to_anchor=(1.25, 1)) plt.title("Job Role Interest") plt.show() N = len(df.EmploymentField.value_counts().index) HSV_tuples = [(x * 1.0 / N, 0.5, 0.5) for x in range(N)] RGB_tuples = list(map(lambda x: colorsys.hsv_to_rgb(*x), HSV_tuples)) labels = df.EmploymentField.value_counts().index (patches, texts) = plt.pie(df.EmploymentField.value_counts(), colors=RGB_tuples, startangle=90) plt.axes().set_aspect('equal', 'datalim') plt.legend(patches, labels, bbox_to_anchor=(1.3, 1)) plt.title('Employment Field') plt.show()
code
324293\cell_15
[ "image_output_1.png" ]
import colorsys import matplotlib.pyplot as plt import pandas as pd labels = df.Gender.value_counts().index N = len(df.EmploymentField.value_counts().index) HSV_tuples = [(x*1.0/N, 0.5, 0.5) for x in range(N)] RGB_tuples = list(map(lambda x: colorsys.hsv_to_rgb(*x), HSV_tuples)) patches, texts = plt.pie(df.Gender.value_counts(), colors=RGB_tuples, startangle=90) plt.axes().set_aspect('equal', 'datalim') plt.legend(patches, labels, bbox_to_anchor=(1.05,1)) plt.title("Gender") plt.show() N = len(df.JobRoleInterest.value_counts().index) HSV_tuples = [(x*1.0/N, 0.5, 0.5) for x in range(N)] RGB_tuples = list(map(lambda x: colorsys.hsv_to_rgb(*x), HSV_tuples)) labels = df.JobRoleInterest.value_counts().index colors = ['OliveDrab', 'Orange', 'OrangeRed', 'DarkCyan', 'Salmon', 'Sienna', 'Maroon', 'LightSlateGrey', 'DimGray'] patches, texts = plt.pie(df.JobRoleInterest.value_counts(), colors=RGB_tuples, startangle=90) plt.axes().set_aspect('equal', 'datalim') plt.legend(patches, labels, bbox_to_anchor=(1.25, 1)) plt.title("Job Role Interest") plt.show() N = len(df.EmploymentField.value_counts().index) HSV_tuples = [(x*1.0/N, 0.5, 0.5) for x in range(N)] RGB_tuples = list(map(lambda x: colorsys.hsv_to_rgb(*x), HSV_tuples)) labels = df.EmploymentField.value_counts().index patches, texts = plt.pie(df.EmploymentField.value_counts(), colors=RGB_tuples, startangle=90) plt.axes().set_aspect('equal', 'datalim') plt.legend(patches, labels, bbox_to_anchor=(1.3, 1)) plt.title("Employment Field") plt.show() df_ageranges = df.copy() bins = [0, 20, 30, 40, 50, 60, 100] df_ageranges['AgeRanges'] = pd.cut(df_ageranges['Age'], bins, labels=['< 20', '20-30', '30-40', '40-50', '50-60', '< 60']) df2 = pd.crosstab(df_ageranges.AgeRanges, df_ageranges.JobPref).apply(lambda r: r / r.sum(), axis=1) N = len(df_ageranges.AgeRanges.value_counts().index) HSV_tuples = [(x * 1.0 / N, 0.5, 0.5) for x in range(N)] RGB_tuples = list(map(lambda x: colorsys.hsv_to_rgb(*x), HSV_tuples)) ax1 = df2.plot(kind='bar', stacked=True, color=RGB_tuples, title='Job preference per Age') (lines, labels) = ax1.get_legend_handles_labels() ax1.legend(lines, labels, bbox_to_anchor=(1.51, 1))
code
324293\cell_18
[ "image_output_1.png" ]
import colorsys import matplotlib.pyplot as plt import pandas as pd labels = df.Gender.value_counts().index N = len(df.EmploymentField.value_counts().index) HSV_tuples = [(x*1.0/N, 0.5, 0.5) for x in range(N)] RGB_tuples = list(map(lambda x: colorsys.hsv_to_rgb(*x), HSV_tuples)) patches, texts = plt.pie(df.Gender.value_counts(), colors=RGB_tuples, startangle=90) plt.axes().set_aspect('equal', 'datalim') plt.legend(patches, labels, bbox_to_anchor=(1.05,1)) plt.title("Gender") plt.show() N = len(df.JobRoleInterest.value_counts().index) HSV_tuples = [(x*1.0/N, 0.5, 0.5) for x in range(N)] RGB_tuples = list(map(lambda x: colorsys.hsv_to_rgb(*x), HSV_tuples)) labels = df.JobRoleInterest.value_counts().index colors = ['OliveDrab', 'Orange', 'OrangeRed', 'DarkCyan', 'Salmon', 'Sienna', 'Maroon', 'LightSlateGrey', 'DimGray'] patches, texts = plt.pie(df.JobRoleInterest.value_counts(), colors=RGB_tuples, startangle=90) plt.axes().set_aspect('equal', 'datalim') plt.legend(patches, labels, bbox_to_anchor=(1.25, 1)) plt.title("Job Role Interest") plt.show() N = len(df.EmploymentField.value_counts().index) HSV_tuples = [(x*1.0/N, 0.5, 0.5) for x in range(N)] RGB_tuples = list(map(lambda x: colorsys.hsv_to_rgb(*x), HSV_tuples)) labels = df.EmploymentField.value_counts().index patches, texts = plt.pie(df.EmploymentField.value_counts(), colors=RGB_tuples, startangle=90) plt.axes().set_aspect('equal', 'datalim') plt.legend(patches, labels, bbox_to_anchor=(1.3, 1)) plt.title("Employment Field") plt.show() df_ageranges = df.copy() bins=[0, 20, 30, 40, 50, 60, 100] df_ageranges['AgeRanges'] = pd.cut(df_ageranges['Age'], bins, labels=["< 20", "20-30", "30-40", "40-50", "50-60", "< 60"]) df2 = pd.crosstab(df_ageranges.AgeRanges,df_ageranges.JobPref).apply(lambda r: r/r.sum(), axis=1) N = len(df_ageranges.AgeRanges.value_counts().index) HSV_tuples = [(x*1.0/N, 0.5, 0.5) for x in range(N)] RGB_tuples = list(map(lambda x: colorsys.hsv_to_rgb(*x), HSV_tuples)) ax1 = df2.plot(kind="bar", stacked=True, color= RGB_tuples, title="Job preference per Age") lines, labels = ax1.get_legend_handles_labels() ax1.legend(lines,labels, bbox_to_anchor=(1.51, 1)) df4 = pd.crosstab(df_ageranges.EmploymentField, df_ageranges.IsUnderEmployed).apply(lambda r: r / r.sum(), axis=1) df4 = df4.sort_values(by=1.0) N = len(df_ageranges.EmploymentField.value_counts().index) HSV_tuples = [(x * 1.0 / N, 0.5, 0.5) for x in range(N)] RGB_tuples = list(map(lambda x: colorsys.hsv_to_rgb(*x), HSV_tuples)) ax1 = df4.plot(kind='bar', stacked=True, color=RGB_tuples, title='Under-employed per Employment Field') (lines, labels) = ax1.get_legend_handles_labels() ax1.legend(lines, ['No', 'Yes'], bbox_to_anchor=(1.51, 1))
code
324293\cell_3
[ "image_output_1.png" ]
import matplotlib.pyplot as plt df.Age.hist(bins=100) plt.xlabel('Age') plt.title('Distribution of Age') plt.show()
code
324293\cell_6
[ "image_output_1.png", "text_plain_output_1.png" ]
import colorsys import matplotlib.pyplot as plt labels = df.Gender.value_counts().index N = len(df.EmploymentField.value_counts().index) HSV_tuples = [(x * 1.0 / N, 0.5, 0.5) for x in range(N)] RGB_tuples = list(map(lambda x: colorsys.hsv_to_rgb(*x), HSV_tuples)) (patches, texts) = plt.pie(df.Gender.value_counts(), colors=RGB_tuples, startangle=90) plt.axes().set_aspect('equal', 'datalim') plt.legend(patches, labels, bbox_to_anchor=(1.05, 1)) plt.title('Gender') plt.show()
code
324293\cell_9
[ "image_output_1.png", "text_plain_output_1.png" ]
import colorsys import matplotlib.pyplot as plt labels = df.Gender.value_counts().index N = len(df.EmploymentField.value_counts().index) HSV_tuples = [(x*1.0/N, 0.5, 0.5) for x in range(N)] RGB_tuples = list(map(lambda x: colorsys.hsv_to_rgb(*x), HSV_tuples)) patches, texts = plt.pie(df.Gender.value_counts(), colors=RGB_tuples, startangle=90) plt.axes().set_aspect('equal', 'datalim') plt.legend(patches, labels, bbox_to_anchor=(1.05,1)) plt.title("Gender") plt.show() N = len(df.JobRoleInterest.value_counts().index) HSV_tuples = [(x * 1.0 / N, 0.5, 0.5) for x in range(N)] RGB_tuples = list(map(lambda x: colorsys.hsv_to_rgb(*x), HSV_tuples)) labels = df.JobRoleInterest.value_counts().index colors = ['OliveDrab', 'Orange', 'OrangeRed', 'DarkCyan', 'Salmon', 'Sienna', 'Maroon', 'LightSlateGrey', 'DimGray'] (patches, texts) = plt.pie(df.JobRoleInterest.value_counts(), colors=RGB_tuples, startangle=90) plt.axes().set_aspect('equal', 'datalim') plt.legend(patches, labels, bbox_to_anchor=(1.25, 1)) plt.title('Job Role Interest') plt.show()
code
324947\cell_10
[ "text_plain_output_1.png" ]
import sqlite3 conn = sqlite3.connect('../input/database.sqlite') c = conn.cursor() ids = [i[0] for i in c.execute('SELECT id FROM League').fetchall()] names = [i[0] for i in c.execute('SELECT name FROM League').fetchall()] id_league = {i: n for (i, n) in zip(ids, names)} id_league ids = [i[0] for i in c.execute('SELECT id FROM Country').fetchall()] names = [i[0] for i in c.execute('SELECT name FROM Country').fetchall()] id_country = {i: n for (i, n) in zip(ids, names)} c.execute('PRAGMA TABLE_INFO(Player_Stats)').fetchall()
code
324947\cell_13
[ "text_plain_output_1.png" ]
from collections import Counter import numpy as np import sqlite3 conn = sqlite3.connect('../input/database.sqlite') c = conn.cursor() ids = [i[0] for i in c.execute('SELECT id FROM League').fetchall()] names = [i[0] for i in c.execute('SELECT name FROM League').fetchall()] id_league = {i: n for (i, n) in zip(ids, names)} id_league ids = [i[0] for i in c.execute('SELECT id FROM Country').fetchall()] names = [i[0] for i in c.execute('SELECT name FROM Country').fetchall()] id_country = {i: n for (i, n) in zip(ids, names)} c.execute('PRAGMA TABLE_INFO(Player_Stats)').fetchall() cols = ', '.join(['home_player_Y' + str(i) for i in range(1, 12)]) c.execute('SELECT {0:s} FROM Match'.format(cols)) Y_array = c.fetchall() Y = np.array([a for row in Y_array for a in row]) from collections import Counter print('Player Y value: # of instances in database (home players)') Counter(Y)
code
324947\cell_16
[ "text_plain_output_1.png", "text_plain_output_2.png" ]
from collections import Counter import datetime as dt import numpy as np import sqlite3 conn = sqlite3.connect('../input/database.sqlite') c = conn.cursor() ids = [i[0] for i in c.execute('SELECT id FROM League').fetchall()] names = [i[0] for i in c.execute('SELECT name FROM League').fetchall()] id_league = {i: n for (i, n) in zip(ids, names)} id_league ids = [i[0] for i in c.execute('SELECT id FROM Country').fetchall()] names = [i[0] for i in c.execute('SELECT name FROM Country').fetchall()] id_country = {i: n for (i, n) in zip(ids, names)} c.execute('PRAGMA TABLE_INFO(Player_Stats)').fetchall() cols = ', '.join(['home_player_Y' + str(i) for i in range(1, 12)]) c.execute('SELECT {0:s} FROM Match'.format(cols)) Y_array = c.fetchall() Y = np.array([a for row in Y_array for a in row]) from collections import Counter Counter(Y) EA_stats = {'player': ', '.join(['overall_rating']), 'goalie': ', '.join(['gk_diving', 'gk_handling', 'gk_kicking', 'gk_positioning', 'gk_reflexes'])} def getTeamScores(match_id, team, EA_stats, printout=False, group='forward_mid_defense_goalie'): """ Return the cumulative average team scores for a given EA Sports FIFA statistic. If particular EA stats are not in the database that stat is taken as the overall player rating. If any positional stat is unavailable (i.e. no goalie information) that stat is taken as the average of the others for that team. team : str 'home' or 'away' EA_stat : dict Names of statistics to cumulate for goalie and players. e.g. {'player': 'overall_rating, heading_accuracy', 'goalie': 'gk_diving, gk_handling'} printout : boolean Option to print out debug information, defaults to False. group : str How to group scores: 'forward_mid_defense_goalie': output 4 values 'all': output 1 value (currently not implemented) """ if team == 'home': player_cols = ', '.join(['home_player_' + str(i) for i in range(1, 12)]) player_Y_cols = np.array(['home_player_Y' + str(i) for i in range(1, 12)]) elif team == 'away': player_cols = ', '.join(['away_player_' + str(i) for i in range(1, 12)]) player_Y_cols = np.array(['away_player_Y' + str(i) for i in range(1, 12)]) c.execute('SELECT {0:s} FROM Match WHERE id={1:d}'.format(player_cols, match_id)) player_api_id = np.array(c.fetchall()[0]) if False not in [p == 0 or p == None for p in player_api_id]: return {'F': np.array([np.nan]), 'M': np.array([np.nan]), 'D': np.array([np.nan]), 'G': np.array([np.nan])} empty_mask = player_api_id != np.array(None) player_api_id = player_api_id[empty_mask] player_Y_cols = ', '.join(player_Y_cols[empty_mask]) c.execute('SELECT {0:s} FROM Match WHERE id={1:d}'.format(player_Y_cols, match_id)) player_Y = c.fetchall()[0] def givePosition(Y): """ Input the Y position of the player (as opposed to the lateral X position) and return the categorical position. """ if Y == 1: return 'G' elif Y == 3: return 'D' elif Y == 5 or Y == 6 or Y == 7: return 'M' elif Y == 8 or Y == 9 or Y == 10 or (Y == 11): return 'F' else: return 'NaN' player_pos = np.array([givePosition(Y) for Y in player_Y]) def toDatetime(datetime): """ Convert string date to datetime object. """ return dt.datetime.strptime(datetime, '%Y-%m-%d %H:%M:%S') c.execute('SELECT date FROM Match WHERE id={}'.format(match_id)) match_date = toDatetime(c.fetchall()[0][0]) def getBestDate(player_id, match_date): """ Find most suitable player stats to use based on date of match and return the corresponding row id from the Player_Stats table. """ c.execute('SELECT id FROM Player_Stats WHERE player_api_id={}'.format(player_id)) ids = np.array([i[0] for i in c.fetchall()]) c.execute('SELECT date_stat FROM Player_Stats WHERE player_api_id={}'.format(player_id)) dates = [toDatetime(d[0]) for d in c.fetchall()] dates_delta = np.array([abs(d - match_date) for d in dates]) return ids[dates_delta == dates_delta.min()][0] def fill_empty_stats(stats, stat_names): """ Input the incomplete EA player stats and corresponing names, return the filled in stats list. Filling with overall_rating or averaging otherwise (i.e. for goalies where there is no overall_rating stat). """ if not np.sum([s == 0 or s == None for s in stats]): return stats stats_dict = {sn: s for (sn, s) in zip(stat_names, stats)} try: fill = stats_dict['overall_rating'] except: fill = np.mean([s for s in stats if s != 0 and s != None]) filled_stats = [] for s in stats: if s == None or s == 0: filled_stats.append(fill) else: filled_stats.append(s) return filled_stats positions = ('G', 'D', 'M', 'F') average_stats = {} for position in positions: if position == 'G': stats = EA_stats['goalie'] else: stats = EA_stats['player'] position_ids = player_api_id[player_pos == position] average_stats[position] = np.zeros(len(stats.split(','))) for player_id in position_ids: best_date_id = getBestDate(player_id, match_date) c.execute('SELECT {0:s} FROM Player_Stats WHERE id={1:d}'.format(stats, best_date_id)) query = np.array(c.fetchall()[0]) query = fill_empty_stats(query, stats.split(', ')) if sum([q == None or q == 0 for q in query]): raise LookupError('Found null EA stats entry at stat_id={}'.format(best_date_id)) average_stats[position] += query average_stats[position] /= len(position_ids) try: average_stats['G'] = np.array([average_stats['G'].mean()]) except: pass insert_value = np.mean([v[0] for v in average_stats.values() if not np.isnan(v)]) for (k, v) in average_stats.items(): if np.isnan(v[0]): average_stats[k] = np.array([insert_value]) return average_stats avg = getTeamScores(999, 'home', EA_stats, printout=True) avg
code
324947\cell_17
[ "text_plain_output_1.png", "text_plain_output_2.png" ]
from collections import Counter import datetime as dt import numpy as np import sqlite3 conn = sqlite3.connect('../input/database.sqlite') c = conn.cursor() ids = [i[0] for i in c.execute('SELECT id FROM League').fetchall()] names = [i[0] for i in c.execute('SELECT name FROM League').fetchall()] id_league = {i: n for (i, n) in zip(ids, names)} id_league ids = [i[0] for i in c.execute('SELECT id FROM Country').fetchall()] names = [i[0] for i in c.execute('SELECT name FROM Country').fetchall()] id_country = {i: n for (i, n) in zip(ids, names)} c.execute('PRAGMA TABLE_INFO(Player_Stats)').fetchall() cols = ', '.join(['home_player_Y' + str(i) for i in range(1, 12)]) c.execute('SELECT {0:s} FROM Match'.format(cols)) Y_array = c.fetchall() Y = np.array([a for row in Y_array for a in row]) from collections import Counter Counter(Y) EA_stats = {'player': ', '.join(['overall_rating']), 'goalie': ', '.join(['gk_diving', 'gk_handling', 'gk_kicking', 'gk_positioning', 'gk_reflexes'])} def getTeamScores(match_id, team, EA_stats, printout=False, group='forward_mid_defense_goalie'): """ Return the cumulative average team scores for a given EA Sports FIFA statistic. If particular EA stats are not in the database that stat is taken as the overall player rating. If any positional stat is unavailable (i.e. no goalie information) that stat is taken as the average of the others for that team. team : str 'home' or 'away' EA_stat : dict Names of statistics to cumulate for goalie and players. e.g. {'player': 'overall_rating, heading_accuracy', 'goalie': 'gk_diving, gk_handling'} printout : boolean Option to print out debug information, defaults to False. group : str How to group scores: 'forward_mid_defense_goalie': output 4 values 'all': output 1 value (currently not implemented) """ if team == 'home': player_cols = ', '.join(['home_player_' + str(i) for i in range(1, 12)]) player_Y_cols = np.array(['home_player_Y' + str(i) for i in range(1, 12)]) elif team == 'away': player_cols = ', '.join(['away_player_' + str(i) for i in range(1, 12)]) player_Y_cols = np.array(['away_player_Y' + str(i) for i in range(1, 12)]) c.execute('SELECT {0:s} FROM Match WHERE id={1:d}'.format(player_cols, match_id)) player_api_id = np.array(c.fetchall()[0]) if False not in [p == 0 or p == None for p in player_api_id]: return {'F': np.array([np.nan]), 'M': np.array([np.nan]), 'D': np.array([np.nan]), 'G': np.array([np.nan])} empty_mask = player_api_id != np.array(None) player_api_id = player_api_id[empty_mask] player_Y_cols = ', '.join(player_Y_cols[empty_mask]) c.execute('SELECT {0:s} FROM Match WHERE id={1:d}'.format(player_Y_cols, match_id)) player_Y = c.fetchall()[0] def givePosition(Y): """ Input the Y position of the player (as opposed to the lateral X position) and return the categorical position. """ if Y == 1: return 'G' elif Y == 3: return 'D' elif Y == 5 or Y == 6 or Y == 7: return 'M' elif Y == 8 or Y == 9 or Y == 10 or (Y == 11): return 'F' else: return 'NaN' player_pos = np.array([givePosition(Y) for Y in player_Y]) def toDatetime(datetime): """ Convert string date to datetime object. """ return dt.datetime.strptime(datetime, '%Y-%m-%d %H:%M:%S') c.execute('SELECT date FROM Match WHERE id={}'.format(match_id)) match_date = toDatetime(c.fetchall()[0][0]) def getBestDate(player_id, match_date): """ Find most suitable player stats to use based on date of match and return the corresponding row id from the Player_Stats table. """ c.execute('SELECT id FROM Player_Stats WHERE player_api_id={}'.format(player_id)) ids = np.array([i[0] for i in c.fetchall()]) c.execute('SELECT date_stat FROM Player_Stats WHERE player_api_id={}'.format(player_id)) dates = [toDatetime(d[0]) for d in c.fetchall()] dates_delta = np.array([abs(d - match_date) for d in dates]) return ids[dates_delta == dates_delta.min()][0] def fill_empty_stats(stats, stat_names): """ Input the incomplete EA player stats and corresponing names, return the filled in stats list. Filling with overall_rating or averaging otherwise (i.e. for goalies where there is no overall_rating stat). """ if not np.sum([s == 0 or s == None for s in stats]): return stats stats_dict = {sn: s for (sn, s) in zip(stat_names, stats)} try: fill = stats_dict['overall_rating'] except: fill = np.mean([s for s in stats if s != 0 and s != None]) filled_stats = [] for s in stats: if s == None or s == 0: filled_stats.append(fill) else: filled_stats.append(s) return filled_stats positions = ('G', 'D', 'M', 'F') average_stats = {} for position in positions: if position == 'G': stats = EA_stats['goalie'] else: stats = EA_stats['player'] position_ids = player_api_id[player_pos == position] average_stats[position] = np.zeros(len(stats.split(','))) for player_id in position_ids: best_date_id = getBestDate(player_id, match_date) c.execute('SELECT {0:s} FROM Player_Stats WHERE id={1:d}'.format(stats, best_date_id)) query = np.array(c.fetchall()[0]) query = fill_empty_stats(query, stats.split(', ')) if sum([q == None or q == 0 for q in query]): raise LookupError('Found null EA stats entry at stat_id={}'.format(best_date_id)) average_stats[position] += query average_stats[position] /= len(position_ids) try: average_stats['G'] = np.array([average_stats['G'].mean()]) except: pass insert_value = np.mean([v[0] for v in average_stats.values() if not np.isnan(v)]) for (k, v) in average_stats.items(): if np.isnan(v[0]): average_stats[k] = np.array([insert_value]) return average_stats avg = getTeamScores(999, 'home', EA_stats, printout=True) avg avg = getTeamScores(5, 'home', EA_stats, printout=True) avg
code
324947\cell_19
[ "text_plain_output_1.png" ]
from collections import Counter import datetime as dt import numpy as np import sqlite3 conn = sqlite3.connect('../input/database.sqlite') c = conn.cursor() ids = [i[0] for i in c.execute('SELECT id FROM League').fetchall()] names = [i[0] for i in c.execute('SELECT name FROM League').fetchall()] id_league = {i: n for (i, n) in zip(ids, names)} id_league ids = [i[0] for i in c.execute('SELECT id FROM Country').fetchall()] names = [i[0] for i in c.execute('SELECT name FROM Country').fetchall()] id_country = {i: n for (i, n) in zip(ids, names)} c.execute('PRAGMA TABLE_INFO(Player_Stats)').fetchall() cols = ', '.join(['home_player_Y' + str(i) for i in range(1, 12)]) c.execute('SELECT {0:s} FROM Match'.format(cols)) Y_array = c.fetchall() Y = np.array([a for row in Y_array for a in row]) from collections import Counter Counter(Y) EA_stats = {'player': ', '.join(['overall_rating']), 'goalie': ', '.join(['gk_diving', 'gk_handling', 'gk_kicking', 'gk_positioning', 'gk_reflexes'])} def getTeamScores(match_id, team, EA_stats, printout=False, group='forward_mid_defense_goalie'): """ Return the cumulative average team scores for a given EA Sports FIFA statistic. If particular EA stats are not in the database that stat is taken as the overall player rating. If any positional stat is unavailable (i.e. no goalie information) that stat is taken as the average of the others for that team. team : str 'home' or 'away' EA_stat : dict Names of statistics to cumulate for goalie and players. e.g. {'player': 'overall_rating, heading_accuracy', 'goalie': 'gk_diving, gk_handling'} printout : boolean Option to print out debug information, defaults to False. group : str How to group scores: 'forward_mid_defense_goalie': output 4 values 'all': output 1 value (currently not implemented) """ if team == 'home': player_cols = ', '.join(['home_player_' + str(i) for i in range(1, 12)]) player_Y_cols = np.array(['home_player_Y' + str(i) for i in range(1, 12)]) elif team == 'away': player_cols = ', '.join(['away_player_' + str(i) for i in range(1, 12)]) player_Y_cols = np.array(['away_player_Y' + str(i) for i in range(1, 12)]) c.execute('SELECT {0:s} FROM Match WHERE id={1:d}'.format(player_cols, match_id)) player_api_id = np.array(c.fetchall()[0]) if False not in [p == 0 or p == None for p in player_api_id]: return {'F': np.array([np.nan]), 'M': np.array([np.nan]), 'D': np.array([np.nan]), 'G': np.array([np.nan])} empty_mask = player_api_id != np.array(None) player_api_id = player_api_id[empty_mask] player_Y_cols = ', '.join(player_Y_cols[empty_mask]) c.execute('SELECT {0:s} FROM Match WHERE id={1:d}'.format(player_Y_cols, match_id)) player_Y = c.fetchall()[0] def givePosition(Y): """ Input the Y position of the player (as opposed to the lateral X position) and return the categorical position. """ if Y == 1: return 'G' elif Y == 3: return 'D' elif Y == 5 or Y == 6 or Y == 7: return 'M' elif Y == 8 or Y == 9 or Y == 10 or (Y == 11): return 'F' else: return 'NaN' player_pos = np.array([givePosition(Y) for Y in player_Y]) def toDatetime(datetime): """ Convert string date to datetime object. """ return dt.datetime.strptime(datetime, '%Y-%m-%d %H:%M:%S') c.execute('SELECT date FROM Match WHERE id={}'.format(match_id)) match_date = toDatetime(c.fetchall()[0][0]) def getBestDate(player_id, match_date): """ Find most suitable player stats to use based on date of match and return the corresponding row id from the Player_Stats table. """ c.execute('SELECT id FROM Player_Stats WHERE player_api_id={}'.format(player_id)) ids = np.array([i[0] for i in c.fetchall()]) c.execute('SELECT date_stat FROM Player_Stats WHERE player_api_id={}'.format(player_id)) dates = [toDatetime(d[0]) for d in c.fetchall()] dates_delta = np.array([abs(d - match_date) for d in dates]) return ids[dates_delta == dates_delta.min()][0] def fill_empty_stats(stats, stat_names): """ Input the incomplete EA player stats and corresponing names, return the filled in stats list. Filling with overall_rating or averaging otherwise (i.e. for goalies where there is no overall_rating stat). """ if not np.sum([s == 0 or s == None for s in stats]): return stats stats_dict = {sn: s for (sn, s) in zip(stat_names, stats)} try: fill = stats_dict['overall_rating'] except: fill = np.mean([s for s in stats if s != 0 and s != None]) filled_stats = [] for s in stats: if s == None or s == 0: filled_stats.append(fill) else: filled_stats.append(s) return filled_stats positions = ('G', 'D', 'M', 'F') average_stats = {} for position in positions: if position == 'G': stats = EA_stats['goalie'] else: stats = EA_stats['player'] position_ids = player_api_id[player_pos == position] average_stats[position] = np.zeros(len(stats.split(','))) for player_id in position_ids: best_date_id = getBestDate(player_id, match_date) c.execute('SELECT {0:s} FROM Player_Stats WHERE id={1:d}'.format(stats, best_date_id)) query = np.array(c.fetchall()[0]) query = fill_empty_stats(query, stats.split(', ')) if sum([q == None or q == 0 for q in query]): raise LookupError('Found null EA stats entry at stat_id={}'.format(best_date_id)) average_stats[position] += query average_stats[position] /= len(position_ids) try: average_stats['G'] = np.array([average_stats['G'].mean()]) except: pass insert_value = np.mean([v[0] for v in average_stats.values() if not np.isnan(v)]) for (k, v) in average_stats.items(): if np.isnan(v[0]): average_stats[k] = np.array([insert_value]) return average_stats all_ids = c.execute('SELECT id FROM Match').fetchall() all_ids = [i[0] for i in sorted(all_ids)] (hF, hM, hD, hG) = ([], [], [], []) (aF, aM, aD, aG) = ([], [], [], []) for i in all_ids: h_stats = getTeamScores(i, 'home', EA_stats, printout=False) hF.append(h_stats['F'][0]) hM.append(h_stats['M'][0]) hD.append(h_stats['D'][0]) hG.append(h_stats['G'][0]) a_stats = getTeamScores(i, 'away', EA_stats, printout=False) aF.append(a_stats['F'][0]) aM.append(a_stats['M'][0]) aD.append(a_stats['D'][0]) aG.append(a_stats['G'][0])
code
324947\cell_23
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd import sqlite3 conn = sqlite3.connect('../input/database.sqlite') c = conn.cursor() df = pd.read_sql(sql='SELECT {} FROM Match'.format('id, country_id, league_id, season, stage, ' + 'date, home_team_api_id, away_team_api_id, ' + 'home_team_goal, away_team_goal'), con=conn) df.head()
code
324947\cell_26
[ "text_html_output_1.png" ]
from collections import Counter import datetime as dt import numpy as np import pandas as pd import sqlite3 conn = sqlite3.connect('../input/database.sqlite') c = conn.cursor() ids = [i[0] for i in c.execute('SELECT id FROM League').fetchall()] names = [i[0] for i in c.execute('SELECT name FROM League').fetchall()] id_league = {i: n for (i, n) in zip(ids, names)} id_league ids = [i[0] for i in c.execute('SELECT id FROM Country').fetchall()] names = [i[0] for i in c.execute('SELECT name FROM Country').fetchall()] id_country = {i: n for (i, n) in zip(ids, names)} c.execute('PRAGMA TABLE_INFO(Player_Stats)').fetchall() cols = ', '.join(['home_player_Y' + str(i) for i in range(1, 12)]) c.execute('SELECT {0:s} FROM Match'.format(cols)) Y_array = c.fetchall() Y = np.array([a for row in Y_array for a in row]) from collections import Counter Counter(Y) EA_stats = {'player': ', '.join(['overall_rating']), 'goalie': ', '.join(['gk_diving', 'gk_handling', 'gk_kicking', 'gk_positioning', 'gk_reflexes'])} def getTeamScores(match_id, team, EA_stats, printout=False, group='forward_mid_defense_goalie'): """ Return the cumulative average team scores for a given EA Sports FIFA statistic. If particular EA stats are not in the database that stat is taken as the overall player rating. If any positional stat is unavailable (i.e. no goalie information) that stat is taken as the average of the others for that team. team : str 'home' or 'away' EA_stat : dict Names of statistics to cumulate for goalie and players. e.g. {'player': 'overall_rating, heading_accuracy', 'goalie': 'gk_diving, gk_handling'} printout : boolean Option to print out debug information, defaults to False. group : str How to group scores: 'forward_mid_defense_goalie': output 4 values 'all': output 1 value (currently not implemented) """ if team == 'home': player_cols = ', '.join(['home_player_' + str(i) for i in range(1, 12)]) player_Y_cols = np.array(['home_player_Y' + str(i) for i in range(1, 12)]) elif team == 'away': player_cols = ', '.join(['away_player_' + str(i) for i in range(1, 12)]) player_Y_cols = np.array(['away_player_Y' + str(i) for i in range(1, 12)]) c.execute('SELECT {0:s} FROM Match WHERE id={1:d}'.format(player_cols, match_id)) player_api_id = np.array(c.fetchall()[0]) if False not in [p == 0 or p == None for p in player_api_id]: return {'F': np.array([np.nan]), 'M': np.array([np.nan]), 'D': np.array([np.nan]), 'G': np.array([np.nan])} empty_mask = player_api_id != np.array(None) player_api_id = player_api_id[empty_mask] player_Y_cols = ', '.join(player_Y_cols[empty_mask]) c.execute('SELECT {0:s} FROM Match WHERE id={1:d}'.format(player_Y_cols, match_id)) player_Y = c.fetchall()[0] def givePosition(Y): """ Input the Y position of the player (as opposed to the lateral X position) and return the categorical position. """ if Y == 1: return 'G' elif Y == 3: return 'D' elif Y == 5 or Y == 6 or Y == 7: return 'M' elif Y == 8 or Y == 9 or Y == 10 or (Y == 11): return 'F' else: return 'NaN' player_pos = np.array([givePosition(Y) for Y in player_Y]) def toDatetime(datetime): """ Convert string date to datetime object. """ return dt.datetime.strptime(datetime, '%Y-%m-%d %H:%M:%S') c.execute('SELECT date FROM Match WHERE id={}'.format(match_id)) match_date = toDatetime(c.fetchall()[0][0]) def getBestDate(player_id, match_date): """ Find most suitable player stats to use based on date of match and return the corresponding row id from the Player_Stats table. """ c.execute('SELECT id FROM Player_Stats WHERE player_api_id={}'.format(player_id)) ids = np.array([i[0] for i in c.fetchall()]) c.execute('SELECT date_stat FROM Player_Stats WHERE player_api_id={}'.format(player_id)) dates = [toDatetime(d[0]) for d in c.fetchall()] dates_delta = np.array([abs(d - match_date) for d in dates]) return ids[dates_delta == dates_delta.min()][0] def fill_empty_stats(stats, stat_names): """ Input the incomplete EA player stats and corresponing names, return the filled in stats list. Filling with overall_rating or averaging otherwise (i.e. for goalies where there is no overall_rating stat). """ if not np.sum([s == 0 or s == None for s in stats]): return stats stats_dict = {sn: s for (sn, s) in zip(stat_names, stats)} try: fill = stats_dict['overall_rating'] except: fill = np.mean([s for s in stats if s != 0 and s != None]) filled_stats = [] for s in stats: if s == None or s == 0: filled_stats.append(fill) else: filled_stats.append(s) return filled_stats positions = ('G', 'D', 'M', 'F') average_stats = {} for position in positions: if position == 'G': stats = EA_stats['goalie'] else: stats = EA_stats['player'] position_ids = player_api_id[player_pos == position] average_stats[position] = np.zeros(len(stats.split(','))) for player_id in position_ids: best_date_id = getBestDate(player_id, match_date) c.execute('SELECT {0:s} FROM Player_Stats WHERE id={1:d}'.format(stats, best_date_id)) query = np.array(c.fetchall()[0]) query = fill_empty_stats(query, stats.split(', ')) if sum([q == None or q == 0 for q in query]): raise LookupError('Found null EA stats entry at stat_id={}'.format(best_date_id)) average_stats[position] += query average_stats[position] /= len(position_ids) try: average_stats['G'] = np.array([average_stats['G'].mean()]) except: pass insert_value = np.mean([v[0] for v in average_stats.values() if not np.isnan(v)]) for (k, v) in average_stats.items(): if np.isnan(v[0]): average_stats[k] = np.array([insert_value]) return average_stats df = pd.read_sql(sql='SELECT {} FROM Match'.format('id, country_id, league_id, season, stage, ' + 'date, home_team_api_id, away_team_api_id, ' + 'home_team_goal, away_team_goal'), con=conn) df = df.dropna() H = lambda x: x[0] > x[1] D = lambda x: x[0] == x[1] A = lambda x: x[0] < x[1] (state, result) = ([], []) for goals in df[['home_team_goal', 'away_team_goal']].values: r = np.array([H(goals), D(goals), A(goals)]) state.append(r) if (r == [1, 0, 0]).sum() == 3: result.append(1) elif (r == [0, 1, 0]).sum() == 3: result.append(2) elif (r == [0, 0, 1]).sum() == 3: result.append(3) df['game_state'] = state df['game_result'] = result df['date'] = pd.to_datetime(df['date']) df['country'] = df['country_id'].map(id_country) df['league'] = df['league_id'].map(id_league) f = lambda x: np.mean(x) df['home_mean_stats'] = list(map(f, df[['home_F_stats', 'home_M_stats', 'home_D_stats', 'home_G_stats']].values)) df['away_mean_stats'] = list(map(f, df[['away_F_stats', 'away_M_stats', 'away_D_stats', 'away_G_stats']].values)) df.dtypes
code
324947\cell_7
[ "image_output_1.png" ]
import sqlite3 conn = sqlite3.connect('../input/database.sqlite') c = conn.cursor() ids = [i[0] for i in c.execute('SELECT id FROM League').fetchall()] names = [i[0] for i in c.execute('SELECT name FROM League').fetchall()] id_league = {i: n for (i, n) in zip(ids, names)} id_league
code
324967\cell_2
[ "text_plain_output_1.png" ]
from subprocess import check_output import numpy as np import pandas as pd from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8'))
code
324967\cell_4
[ "text_plain_output_1.png" ]
import sqlite3 con = sqlite3.connect('../input/database.sqlite') cursor = con.cursor() cursor.execute("SELECT name FROM sqlite_master WHERE type='table';") print(cursor.fetchall())
code
324967\cell_7
[ "image_output_1.png", "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import sqlite3 con = sqlite3.connect('../input/database.sqlite') cursor = con.cursor() cursor.execute("SELECT name FROM sqlite_master WHERE type='table';") def load(what='NationalNames'): assert what in ('NationalNames', 'StateNames') cols = ['Name', 'Year', 'Gender', 'Count'] if what == 'StateNames': cols.append('State') df = pd.read_sql_query('SELECT {} from {}'.format(','.join(cols), what), con) return df df = load(what='NationalNames') df.query('Name=="Alice"')[['Year', 'Count']].groupby('Year').sum().plot()
code
324967\cell_9
[ "image_output_1.png", "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import sqlite3 con = sqlite3.connect('../input/database.sqlite') cursor = con.cursor() cursor.execute("SELECT name FROM sqlite_master WHERE type='table';") def load(what='NationalNames'): assert what in ('NationalNames', 'StateNames') cols = ['Name', 'Year', 'Gender', 'Count'] if what == 'StateNames': cols.append('State') df = pd.read_sql_query('SELECT {} from {}'.format(','.join(cols), what), con) return df df2 = load(what='StateNames') tmp = df2.groupby(['Year', 'State']).agg({'Count': 'sum'}).reset_index() largest_states = tmp.groupby('State').agg({'Count': 'sum'}).sort_values('Count', ascending=False).index[:5].tolist() tmp.pivot(index='Year', columns='State', values='Count')[largest_states].plot()
code
325017\cell_5
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd masterDF = pd.read_csv('../input/emails.csv') messageList = masterDF['message'].tolist() bodyList = [] for message in messageList: firstSplit = message.split('X-FileName: ', 1)[1] secondSplit = firstSplit.split('.') if len(secondSplit) > 1: secondSplit = secondSplit[1] body = ''.join(secondSplit)[4:] bodyList.append(body)
code
325098\cell_2
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png", "text_plain_output_3.png" ]
from subprocess import check_output import numpy as np import pandas as pd import matplotlib.pyplot as plt from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8')) from sklearn.cross_validation import train_test_split from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier df = pd.read_csv('../input/nflplaybyplay2015.csv', low_memory=False) df.columns
code
325098\cell_5
[ "text_plain_output_1.png" ]
from sklearn.cross_validation import train_test_split from sklearn.ensemble import RandomForestClassifier,GradientBoostingClassifier df = df[df['posteam'] == 'CHI'] df = df[df['DefensiveTeam'] == 'GB'] used_downs = [1, 2, 3] df = df[df['down'].isin(used_downs)] valid_plays = ['Pass', 'Run', 'Sack'] df = df[df['PlayType'].isin(valid_plays)] pass_plays = ['Pass', 'Sack'] df['is_pass'] = df['PlayType'].isin(pass_plays).astype('int') df = df[['down', 'yrdline100', 'ScoreDiff', 'ydstogo', 'TimeSecs', 'is_pass']] (X, test) = train_test_split(df, test_size=0.2) y = X.pop('is_pass') rf = RandomForestClassifier(n_estimators=1000) rf.fit(X, y)
code
325098\cell_7
[ "text_plain_output_1.png" ]
from sklearn.cross_validation import train_test_split from sklearn.ensemble import RandomForestClassifier,GradientBoostingClassifier df = df[df['posteam'] == 'CHI'] df = df[df['DefensiveTeam'] == 'GB'] used_downs = [1, 2, 3] df = df[df['down'].isin(used_downs)] valid_plays = ['Pass', 'Run', 'Sack'] df = df[df['PlayType'].isin(valid_plays)] pass_plays = ['Pass', 'Sack'] df['is_pass'] = df['PlayType'].isin(pass_plays).astype('int') df = df[['down', 'yrdline100', 'ScoreDiff', 'ydstogo', 'TimeSecs', 'is_pass']] (X, test) = train_test_split(df, test_size=0.2) y = X.pop('is_pass') rf = RandomForestClassifier(n_estimators=1000) rf.fit(X, y) test_y = test.pop('is_pass') rf.score(test, test_y)
code
325101\cell_2
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png", "text_plain_output_3.png" ]
from subprocess import check_output import numpy as np import pandas as pd import matplotlib.pyplot as plt from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8')) from sklearn.cross_validation import train_test_split from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier from sklearn.svm import SVC df = pd.read_csv('../input/nflplaybyplay2015.csv', low_memory=False) df.columns
code
325101\cell_4
[ "text_plain_output_1.png" ]
from sklearn.cross_validation import train_test_split from sklearn.ensemble import RandomForestClassifier,GradientBoostingClassifier from sklearn.svm import SVC """ Boiler-Plate/Feature-Engineering to get frame into a testable format """ used_downs = [1, 2, 3] df = df[df['down'].isin(used_downs)] valid_plays = ['Pass', 'Run', 'Sack'] df = df[df['PlayType'].isin(valid_plays)] pass_plays = ['Pass', 'Sack'] df['is_pass'] = df['PlayType'].isin(pass_plays).astype('int') df = df[['down', 'yrdline100', 'ScoreDiff', 'ydstogo', 'TimeSecs', 'is_pass']] (X, test) = train_test_split(df, test_size=0.2) y = X.pop('is_pass') test_y = test.pop('is_pass') rf = RandomForestClassifier(n_estimators=10) gb = GradientBoostingClassifier(n_estimators=10) sv = SVC() rf.fit(X, y) gb.fit(X, y) sv.fit(X, y)
code
325101\cell_5
[ "text_plain_output_1.png" ]
from sklearn.cross_validation import train_test_split from sklearn.ensemble import RandomForestClassifier,GradientBoostingClassifier from sklearn.svm import SVC """ Boiler-Plate/Feature-Engineering to get frame into a testable format """ used_downs = [1, 2, 3] df = df[df['down'].isin(used_downs)] valid_plays = ['Pass', 'Run', 'Sack'] df = df[df['PlayType'].isin(valid_plays)] pass_plays = ['Pass', 'Sack'] df['is_pass'] = df['PlayType'].isin(pass_plays).astype('int') df = df[['down', 'yrdline100', 'ScoreDiff', 'ydstogo', 'TimeSecs', 'is_pass']] (X, test) = train_test_split(df, test_size=0.2) y = X.pop('is_pass') test_y = test.pop('is_pass') rf = RandomForestClassifier(n_estimators=10) gb = GradientBoostingClassifier(n_estimators=10) sv = SVC() rf.fit(X, y) gb.fit(X, y) sv.fit(X, y) rf.score(test, test_y)
code
325101\cell_6
[ "text_plain_output_1.png" ]
from sklearn.cross_validation import train_test_split from sklearn.ensemble import RandomForestClassifier,GradientBoostingClassifier from sklearn.svm import SVC """ Boiler-Plate/Feature-Engineering to get frame into a testable format """ used_downs = [1, 2, 3] df = df[df['down'].isin(used_downs)] valid_plays = ['Pass', 'Run', 'Sack'] df = df[df['PlayType'].isin(valid_plays)] pass_plays = ['Pass', 'Sack'] df['is_pass'] = df['PlayType'].isin(pass_plays).astype('int') df = df[['down', 'yrdline100', 'ScoreDiff', 'ydstogo', 'TimeSecs', 'is_pass']] (X, test) = train_test_split(df, test_size=0.2) y = X.pop('is_pass') test_y = test.pop('is_pass') rf = RandomForestClassifier(n_estimators=10) gb = GradientBoostingClassifier(n_estimators=10) sv = SVC() rf.fit(X, y) gb.fit(X, y) sv.fit(X, y) gb.score(test, test_y)
code
README.md exists but content is empty.
Downloads last month
26