seq_id
string | text
string | repo_name
string | sub_path
string | file_name
string | file_ext
string | file_size_in_byte
int64 | program_lang
string | lang
string | doc_type
string | stars
int64 | dataset
string | pt
string | api
list |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2614278018
|
# topics = ['动态规划']
from typing import List
class Solution:
def maxUncrossedLines(self, nums1: List[int], nums2: List[int]) -> int:
"""
Dynamic Programming
time O(mn), space O(mn), m 和 n 分别为 nums1 和 nums2 的长度
"""
m, n = len(nums1), len(nums2)
dp = [[0] * (n + 1) for _ in range(m + 1)]
for i, num1 in enumerate(nums1):
for j, num2 in enumerate(nums2):
if num1 == num2:
dp[i + 1][j + 1] = dp[i][j] + 1
else:
dp[i + 1][j + 1] = max(dp[i][j + 1], dp[i + 1][j])
return dp[m][n]
|
show-me-code/signInHelper-using-face-
|
algorithms/[1035]不相交的线/solution.py
|
solution.py
|
py
| 655 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "typing.List",
"line_number": 7,
"usage_type": "name"
}
] |
10502969372
|
from rest_framework.test import APITestCase
from ...services.notification_service import NotificationService
from ...api_services.notification import NotificationAPIService
from django.urls import reverse
class TestNotificationAPIServices(APITestCase):
def setUp(self):
self.payload = {
"title": "Winter discount sale started",
"description": "Enter coupon-code to get flat 10% discount"
}
self.notification_id = NotificationService().create(self.payload).id
self.url = reverse("notification")
def test_create_notification(self):
data = {
"title": "Summer discount sale started",
"description": "Enter coupon-code to get flat 10% discount"
}
notification = NotificationAPIService().create_notification(data)
self.assertEqual(notification.get('title'), data.get('title'))
def test_get_all_notification(self):
notifications = NotificationAPIService().get_all_notification()
self.assertNotEqual(len(notifications), 0)
def test_get_notification_by_id(self):
notification = NotificationAPIService().get_notification_by_id(self.notification_id)
self.assertEqual(notification.get('id'), self.notification_id)
def test_delete_notification_by_id(self):
response = NotificationAPIService().delete_notification(self.notification_id)
self.assertEqual(response, True)
|
anojkr/onboarding-project
|
push_notification/apps/notification/tests/unit/test_notification_api_services.py
|
test_notification_api_services.py
|
py
| 1,441 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "rest_framework.test.APITestCase",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "services.notification_service.NotificationService",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "django.urls.reverse",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "api_services.notification.NotificationAPIService",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "api_services.notification.NotificationAPIService",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "api_services.notification.NotificationAPIService",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "api_services.notification.NotificationAPIService",
"line_number": 33,
"usage_type": "call"
}
] |
7295749579
|
import discord, datetime, time
from discord.ext import commands, tasks
from itertools import cycle
start_time = time.time()
class Events(commands.Cog):
def __init__(self, client):
self.client = client
self.status = cycle(['Bind', 'Haven', 'Ascent','Split','Fracture'])
@tasks.loop(seconds=10.0)
async def change_status(self):
await self.client.change_presence(activity=discord.Game(next(self.status)))
@commands.Cog.listener()
async def on_ready(self):
await self.client.wait_until_ready()
self.change_status.start()
print("Event Commands Are Ready!")
@commands.command(pass_context=True)
async def uptime(self, ctx):
current_time = time.time()
difference = int(round(current_time - start_time))
text = str(datetime.timedelta(seconds=difference))
def setup(bot):
bot.add_cog(Events(bot))
|
awesomedustyn/CAP-N-
|
cogs/Events.py
|
Events.py
|
py
| 908 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "time.time",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.Cog",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "itertools.cycle",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "discord.Game",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "discord.ext.tasks.loop",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "discord.ext.tasks",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.Cog.listener",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.Cog",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "time.time",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 23,
"usage_type": "name"
}
] |
8733632634
|
from hmmlearn import hmm
import numpy as np
class CustomHMM:
def __init__(self):
def build_hmm():
model = hmm.GMMHMM(n_components=3, n_mix=3, covariance_type="diag", init_params="t")
model.transmat_ = np.array([[0.5, 0.5, 0.0],
[0.0, 0.5, 0.5],
[0.0, 0.0, 1.0]])
return model
self.hmm_0 = build_hmm()
self.hmm_1 = build_hmm()
def fit(self, X_train, y_train):
# X_train shape(n_instances, n_samples)
labels = set(y_train)
if len(labels) != 2:
raise Exception("y_train doesn't contain 2 classes")
X_0 = X_train[y_train == 0, :]
X_1 = X_train[y_train == 1, :]
self.hmm_0.fit(X_0)
self.hmm_1.fit(X_1)
def predict(self, X_test):
res = []
for x in X_test:
x = np.reshape(x,[1,len(x)])
res.append(0 if self.hmm_0.score(x) > self.hmm_1.score(x) else 1)
return np.array(res)
|
mattschaff/upennvoicerecog
|
CustomHMM.py
|
CustomHMM.py
|
py
| 986 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "hmmlearn.hmm.GMMHMM",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "hmmlearn.hmm",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "numpy.reshape",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 32,
"usage_type": "call"
}
] |
41655800194
|
import dash
import dash_html_components as html
from dash.dependencies import Input, Output, State
import dash_core_components as dcc
import dash_bootstrap_components as dbc
import base64
import pandas as pd
import io
import dash_table
import os
import ast
from seaborn.matrix import heatmap
from .layouts.layout import html_layout
from dash.exceptions import PreventUpdate
import requests
import plotly.graph_objects as go
import plotly.express as px
import plotly.figure_factory as ff
import matplotlib.pyplot as plt
import base64
from flask_sqlalchemy import SQLAlchemy
import psycopg2
from .layouts.page2_layout import server_layout
from .layouts.page3_layout import layout3
from .modelHelperFunctions import *
# from .layouts.EDA_layout import EDA_layout
# from .EDA_callbacks import EDA_callback
## size convert
import enum
# Enum for size units
df = pd.DataFrame([])
file = open('data/var_info.txt', 'r')
contents = file.read()
dictionary = ast.literal_eval(contents)
file. close()
file1 = open('data/section_name.txt', 'r')
contents1 = file1.read()
dictionary_name = ast.literal_eval(contents1)
file1.close()
file2 = open('data/categorized_type.txt', 'r')
contents2 = file2.read()
categories = ast.literal_eval(contents2)
file2.close()
def load_info_dict(file):
f = open(file, 'r')
cont = f.read()
f.close()
return ast.literal_eval(cont)
VAR_PATH = 'data/var_info.txt'
STATE_PATH = 'data/state_info.txt'
SECTION_PATH = 'data/section_name.txt'
REGRESSON_LIST = ["Linear", "Lasso", "Ridge",
"LassoLars", "Bayesian Ridge", "Elastic Net"]
REG_CRITERION = ['Index', 'Label', 'Model', 'Penalty', 'MAE', 'MSE', '']
CLASSIFICATION_LIST = ["Logistic", "LDA"]
#CLF_CRITERION = ["Index", "Label", "Model", "Penalty", "Accuracy", "ROC_AUC score", "Precision", "Recall", "F1-Score"]
CLF_CRITERION = ["Index", "Label", "Model", "Penalty",
"Accuracy"]
var_info = load_info_dict(VAR_PATH)
section_info = load_info_dict(SECTION_PATH)
state_info = load_info_dict(STATE_PATH)
SECTION = list(section_info.keys())
STATE = list(state_info.keys())
class SIZE_UNIT(enum.Enum):
BYTES = 1
KB = 2
MB = 3
GB = 4
def convert_unit(size_in_bytes, unit):
""" Convert the size from bytes to other units like KB, MB or GB"""
if unit == SIZE_UNIT.KB:
return size_in_bytes/1024
elif unit == SIZE_UNIT.MB:
return size_in_bytes/(1024*1024)
elif unit == SIZE_UNIT.GB:
return size_in_bytes/(1024*1024*1024)
else:
return size_in_bytes
def download_file_from_google_drive(id, destination):
def get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
def save_response_content(response, destination):
CHUNK_SIZE = 32768
with open(destination, "wb") as f:
for chunk in response.iter_content(CHUNK_SIZE):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
URL = "https://docs.google.com/uc?export=download"
session = requests.Session()
response = session.get(URL, params = { 'id' : id }, stream = True)
token = get_confirm_token(response)
if token:
params = { 'id' : id, 'confirm' : token }
response = session.get(URL, params = params, stream = True)
save_response_content(response, destination)
all_performance_layouts = []
history_html = None
def dataDownload(server):
global df
app = dash.Dash(server=server,
routes_pathname_prefix='/dashapp/',
external_stylesheets=[
'https://codepen.io/dapraxis/pen/gOPGzPj.css',
'/static/dist/css/styles.css',
'https://fonts.googleapis.com/css?family=Lato',
'https://codepen.io/chriddyp/pen/bWLwgP.css'
])
allData = {'BRFSS':'1tNWPT9xW1jc3Qta_h4CGHp9lRbHM1540'}
app.index_string = html_layout
db = SQLAlchemy(server)
app.scripts.config.serve_locally = True # Uploaded to npm, this can work online now too.
df = pd.DataFrame([])
# df.to_csv(os.stat(r`str(os.getcwd())+'\\uploads\\'+str('download.csv')`))
org_layout = html.Div([
html.Div([], id='hidden-div', style={'display': 'none'}),
dcc.Dropdown(
id='demo-dropdown',
options=[
{'label': 'BRFSS', 'value': 'BRFSS'}
],
searchable=False,
placeholder="Select A Medical Dataset",
style = {
'width':'50%',
'margin':'2% 0%'
}
),
html.Div(id='dd-output-container'),
# html.Div(id='output')
dcc.Loading(
children=[
html.Div(id='output')
], type='cube')
],style={
'width': '70%',
'margin': '5% 15%',
# 'text-align': 'center',
})
app.layout = html.Div([
dcc.Location(id='url', refresh=False),
html.Div(id='page-content')
])
# @app.callback(Input('test', 'n_clicks'))
@app.callback(Output('page-content', 'children'),
[Input('url', 'pathname')])
def display_page(pathname):
if pathname == '/dashapp/':
return org_layout
elif pathname == '/dashapp/EDA/':
# create_EDA(app, df)
return server_layout
elif pathname == '/dashapp/RFA/':
return layout3
# return '404'
@app.callback(
dash.dependencies.Output('dd-output-container', 'children'),
[dash.dependencies.Input('demo-dropdown', 'value')])
def update_output(value):
if(not value):
raise PreventUpdate
return 'You have selected "{}"'.format(value)
@app.callback(Output('output', 'children'),
[Input('demo-dropdown', 'value')])
def update_output(value):
global df
if value is not None:
# file_id = allData[value]
# destination = 'download.csv'
# download_file_from_google_drive(file_id, destination)
# connection = False
if value =='BRFSS':
df = pd.read_sql_query(sql = 'SELECT * FROM brfss2 LIMIT 100', con=db.engine, index_col='object_id')
df.columns = df.columns.str.upper()
return parse_contents(value)
def parse_contents(filename):
global df
'''
profile = ProfileReport(df, title = "Pandas Profiling Report")
profile.to_file("report.html") #change it so that it automatically saves it in the folder where this file is
These lines create the report using pandas profiling, however it takes quite long as the report is 11 MB. For now,
only one dataset is used so these lines don't neeed to run each time
'''
new_df = df.iloc[:5, :5]
return html.Div([
# html.H5("Upload File: {}".format(filename)),
html.Hr(),
html.A(html.Button('Next', id='btn'), href='/dashapp/EDA/'),
html.Hr(),
dcc.Loading(children=[
dash_table.DataTable(
id='database-table',
columns=[{'name': i, 'id': i} for i in new_df.columns],
data=new_df.to_dict('records'),
sort_action="native",
sort_mode='native',
page_size=300,
fixed_rows = 100,
style_table={
'maxHeight': '80ex',
#'overflowY': 'scroll',
#'overflowX': 'scroll',
'width': '100%',
'border' : '1px solid blue'
},
style_cell={
'color': 'blue'
},
),
html.Hr(), # horizontal line
#html.A(html.Button('Next', id='btn'), href='/dashapp/EDA/')
], type='cube')
])
## EDA STARTS
@app.callback(dash.dependencies.Output('dropdown_content', 'children'),
[dash.dependencies.Input('dropdown_section_name', 'value')])
def render_tab_preparation_multiple_dropdown(value):
#print (dictionary_name)
all_vals = []
for i in dictionary_name.values():
for j in i:
all_vals.append(j)
if value:
for key in dictionary_name:
if key == value:
return_div = html.Div([
html.Br(),
dcc.Dropdown(
id='dropdown',
options=[
{'label': i, 'value': i} for i in dictionary_name[key]
],
placeholder="Select Feature",
# value='features'
),
#dcc.Dropdown(
# id = 'dropdown2',
# options = [
# {'label': i, 'value' : i} for i in all_vals
# ],
# placeholder="Select Second Feature",
#),
html.Div(id='single_commands'),
])
return return_div
else:
raise PreventUpdate
'''
@app.callback(dash.dependencies.Output('dropdown_content2', 'children'),
[dash.dependencies.Input('dropdown_section_name2', 'value')])
def render_tab_preparation_multiple_dropdown2(value2):
all_vals = []
for i in dictionary_name.values():
for j in i:
all_vals.append(j)
return_div = html.Div([
html.Br(),
dcc.Dropdown(
id = 'dropdown2',
options = [
{'label': i, 'value' : i} for i in all_vals
],
placeholder="Select Second Feature",
),
html.Div(id='single_commands2'),
])
return return_div
'''
@app.callback(
dash.dependencies.Output('dd-notice', 'children'),
[dash.dependencies.Input('dropdown', 'value'),])
def update_selected_feature_div(value):
if value:
result = []
for key, values in dictionary[value].items():
result.append('{}:{}'.format(key, values))
div = html.Div([
html.Div([
html.H3('Feature Informatation')
]),
html.Div([
html.Ul([html.Li(x) for x in result])
]),
])
return div
else:
raise PreventUpdate
@app.callback(
[dash.dependencies.Output('dd-output-container2', 'children'),
dash.dependencies.Output('graph_plot', 'children')],
[dash.dependencies.Input('dropdown', 'value')])
def preparation_tab_information_report(value):
global df
if value:
str_value = str(value)
df = pd.read_sql_query(sql = 'SELECT {}, object_id FROM brfss2 LIMIT 100'.format(str_value.lower()), con=db.engine, index_col='object_id')
R_dict = df.describe().to_dict()
result = []
for key in R_dict[str_value.lower()]:
result.append('{}: {}'.format(key, R_dict[str_value.lower()][key]))
div = html.Div([
html.Div([
html.H3('Feature Statistics')
]),
html.Div([
html.Ul([html.Li(x) for x in result])
# dcc.Graph(figure= px.box(df[str_value.lower()], points="all"))
]),
])
g = dcc.Loading(id='graph_loading', children=[
dcc.Graph(
figure={"layout": {
"xaxis": {"visible": False},
"yaxis": {"visible": False},
"annotations": [{
"text": "Please Select the Feature you would like to Visualize",
"xref": "paper",
"yref": "paper",
"showarrow": False,
"font": {"size": 28}
}]
}}, id='dd-figure'),
])
return [div, g]
else:
raise PreventUpdate
# Define a function for drawing box plot for selected feature
@app.callback(
dash.dependencies.Output('dd-figure', 'figure'),
# [dash.dependencies.Input('dropdown', 'value'),dash.dependencies.Input("hidden-div", 'children')])
[dash.dependencies.Input('dropdown', 'value')])
def preparation_tab_visualize_features(value):
global df
if value:
str_value = str(value).lower()
df = pd.read_sql_query(sql = 'SELECT {}, object_id FROM brfss2 LIMIT 100'.format(str_value.lower()), con=db.engine, index_col='object_id')
integers = categories[0]
floats = categories[1]
if str_value in integers:
fig = px.histogram(df[str_value], y=str_value)
elif str_value in floats:
fig = px.box(df[str_value], y=str_value)
else:
fig = px.histogram(df[str_value], y=str_value)
return fig
else:
raise PreventUpdate
@app.callback(Output(component_id='feature_dropdown', component_property='options'),
[Input(component_id='section_dropdown', component_property='value')])
def update_feature_dropdown(section):
if section == None:
return dash.no_update
lst = section_info.get(section)
return [{'label': '{}: {}'.format(i, var_info.get(i).get('Label')), 'value': i} for i in lst]
@app.callback(Output(component_id='model_dropdown', component_property='options'),
[Input(component_id='type_dropdown', component_property='value')])
def update_model_dropdown(task):
if task == "Regression":
return [{'label': i, 'value': i} for i in REGRESSON_LIST]
elif task == "Classification":
return [{'label': i, 'value': i} for i in CLASSIFICATION_LIST]
else:
return dash.no_update
@app.callback(Output('slider-output-container', 'children'), [Input('num-of-factors', 'value')])
def update_output(value):
return 'You have selected {} top risk factors'.format(value)
@app.callback(Output('penalty-output-container', 'children'), [Input('penalty', 'value')])
def update_output(value):
return 'You have selected {} as penalty multiplier'.format(value)
@app.callback([Output('RFA_output', 'children'),
Output('reg_rec', 'data'),
Output('clf_rec', 'data')],
[Input('run_button', 'n_clicks')],
[State('state_dropdown', 'value'),
State('feature_dropdown', 'value'),
State('type_dropdown', 'value'),
State('model_dropdown', 'value'),
State('penalty', 'value'),
State('num-of-factors', 'value'),
State('reg_rec', 'data'),
State('clf_rec', 'data')])
def perform_risk_factor_analysis(n_clicks, state, label, task_type, model_type, penalty, num_of_factor, reg_data, clf_data):
global df
global history_html
if n_clicks > 0:
if((label == None) or (task_type == None) or (model_type == None)):
# return [], reg_data, clf_data, True
return dash.no_update, dash.no_update, dash.no_update
state_df = pd.read_sql_query(sql = 'SELECT * FROM brfss2 WHERE _state = {}'.format(int(state)), con=db.engine, index_col='object_id')
state_df.columns = state_df.columns.str.upper()
y = state_df[label]
X = state_df.drop([label], axis=1)
col_names = X.columns
if task_type == "Regression":
model_res = regression_models(
X, y, model_type, True, alpha=penalty)
model = model_res[0]
res = reg_risk_factor_analysis(model, col_names, num_of_factor)
performance_layout = html.Div(
html.Div(
dash_table.DataTable(
id="reg_table",
columns=[{'name': val, 'id': val}
for val in REG_CRITERION[1:]],
data=[{"Label": label, 'Model': model_type, 'Penalty': penalty, 'MAE': round(model_res[1], 5), 'MSE':round(model_res[2], 5),
'R2':round(model_res[3], 5)}],
style_cell={
'height': 'auto',
'textAlign': 'right'
# all three widths are needed
# 'minWidth': '180px', 'width': '180px', 'maxWidth': '180px',
# 'whiteSpace': 'normal'
},
)
),
)
info = "Perform Risk Factor Analysis with normalized data based on {} regression".format(
model_type)
elif task_type == "Classification":
model_res = classification_models(
X, y, model_type, True, C=penalty)
model = model_res[0]
cfn_matrix = model_res[5]
#heatmap_filename = model_res[-1]
#encoded_image = base64.b64encode(open(heatmap_filename, 'rb').read())
res = clf_risk_factor_analysis(model, col_names, num_of_factor, label)[0]
table_columns = []
categories = clf_risk_factor_analysis(model, col_names, num_of_factor, label)[1]
table_columns.append(categories) #categories (yes, no, etc.)
table_columns.append(model_res[2]) #precision
table_columns.append(model_res[3]) #recall
table_columns.append(model_res[4]) #f1
accuracy_column = [model_res[1]] * len(table_columns[1])
table_columns.append(accuracy_column)
#print (accuracy_column)
CLF_CRITERION2 = ["Category", "Precision", "Recall", "F1", "Accuracy"]
#print (table_columns)
if len(table_columns[0]) == len(table_columns[1]) + 1:
table_columns[0] = table_columns[0][:-1]
elif len(table_columns[0]) == len(table_columns[1]) + 2:
table_columns[0] = table_columns[0][:-2]
elif len(table_columns[0]) == len(table_columns[1]) - 1:
i = 1
while (i <= 4):
table_columns[i] = table_columns[i][:-1]
i += 1
table_dict = {"Category" : table_columns[0], "Precision" : table_columns[1], "Recall" : table_columns[2], "F1": table_columns[3], "Accuracy" : table_columns[4]}
table_df = pd.DataFrame(table_dict)
#print (table_df)
performance_layout = html.Div(
#html.Div(
# dash_table.DataTable(
# id="clf_table",
# columns=[{'name': val, 'id': val}
# for val in CLF_CRITERION[1:]],
# #data=[{"Label": label, 'Model': model_type, "Penalty": penalty, "Accuracy": round(model_res[1], 5),
# # "Precision":round(model_res[2], 5), "Recall":round(model_res[3], 5), "F1-Score":round(model_res[4], 5)}],
# data=[{"Label": label, 'Model': model_type, "Penalty": penalty, "Accuracy": model_res[1]}],
# style_cell={
# 'height': 'auto',
# 'textAlign': 'right'
# # all three widths are needed
# # 'minWidth': '180px', 'width': '180px', 'maxWidth': '180px',
# # 'whiteSpace': 'normal'
# }
# )
#),
html.Div(
dash_table.DataTable(
id="clf_table",
columns = [{'name' : val, 'id' : val}
for val in CLF_CRITERION2],
data = table_df.to_dict('records'),
style_cell = {
# 'height' : 'auto',
# 'textAllign' : 'right',
}
)
)
)
all_performance_layouts.append(performance_layout)
info = "Perform Risk Factor Analysis with normalized data based on {} model".format(
model_type)
else:
return [], reg_data, clf_data
res_tab_col = ["Rank", "Factor", "Absolute Weight", "Sign"]
#res = reg_risk_factor_analysis(model, col_names, num_of_factor)
def return_history(history_html):
history_all = html.Div([
html.Details([
html.Summary("Performance of History Models"),
html.Details([
html.Summary("Performance Records for Classification Model"),
html.Div(
dash_table.DataTable(
id="clf_rec",
columns=[{'name': val, 'id': val}
for val in CLF_CRITERION],
data=[],
style_cell={
'height': 'auto',
'textAlign': 'right'
# all three widths are needed
# 'minWidth': '180px', 'width': '180px', 'maxWidth': '180px',
# 'minWidth': '100px', 'width': '120px', 'maxWidth': '240px',
# 'whiteSpace': 'normal'
}
)
),
#html.Details([
#html.Summary("Performance Table"),
#performance_layout])
history_html
], style={'paddingLeft':'25px'}),
html.Details([
html.Summary("Performance Records for Regression Model"),
html.Div(
dash_table.DataTable(
id="reg_rec",
columns=[{'name': val, 'id': val}
for val in REG_CRITERION],
data=[],
style_cell={
'height': 'auto',
'textAlign': 'right'
# all three widths are needed
# 'minWidth': '180px', 'width': '180px', 'maxWidth': '180px',
# 'minWidth': '100px', 'width': '120px', 'maxWidth': '240px',
# 'whiteSpace': 'normal'
}
)
),
], style={'paddingLeft':'25px'})
])
])
return history_all
if task_type == "Classification":
history_html = html.Div(children=[], style={'paddingLeft':'25px'})
for i in range(len(all_performance_layouts)):
text = "Performance table for Index " + str(i+1)
temp_html = html.Details([
html.Summary(text),
all_performance_layouts[i]])
history_html.children.append(temp_html)
######## so history_html is a list with all the temp_htmls, and they need to be combined via comments like
'''
html.Details([
html.Summray(text),
all_performance_layouts[0])],
html.Details([
html.Summary(text),
all_performance_layouts[1])],
and so on
'''
#print (cfn_matrix)
#print (categories)
if len(categories) == len(cfn_matrix) - 1:
categories.append('Refused ')
elif len(categories) == len(cfn_matrix) + 1:
categories = categories[:-1]
#print (categories)
heatmap_fig = ff.create_annotated_heatmap(cfn_matrix.T, x = categories, y = categories)
layout = html.Div(children=[
html.P(
html.Label(info)
),
html.Div(
dash_table.DataTable(
id="RFA_table",
columns=[
{'name': i, 'id': i} for i in res_tab_col
],
data=res,
style_cell={
'height': 'auto',
'minWidth': '180px', 'width': '180px', 'maxWidth': '180px',
'whiteSpace': 'normal',
'textAlign': 'right'
},
style_header={
'backgroundColor': 'white',
'fontWeight': 'bold'
},
style_data_conditional=[
{
'if': {
'column_id': 'Sign',
'filter_query': '{Sign} = "-"'
},
'backgroundColor': 'dodgerblue',
'color': 'white'
},
{
'if': {
'column_id': 'Sign',
'filter_query': '{Sign} = "+"'
},
'backgroundColor': '#85144b',
'color': 'white'
},
],
)
),
html.P(
html.Label("{} model performance: ".format(model_type))
),
performance_layout,
#html.Div([
# html.Label("Heatmap for the Confusion Matrix:"),
# html.Img(src='data:image/png;base64,{}'.format(encoded_image.decode())) #encoded_image does not exist for continous variables
#]),
html.Div([
dcc.Graph(figure=heatmap_fig)
]),
return_history(history_html)
])
elif task_type == "Regression":
layout = html.Div(children=[
html.P(
html.Label(info)
),
html.Div(
dash_table.DataTable(
id="RFA_table",
columns=[
{'name': i, 'id': i} for i in res_tab_col
],
data=res,
style_cell={
'height': 'auto',
'minWidth': '180px', 'width': '180px', 'maxWidth': '180px',
'whiteSpace': 'normal',
'textAlign': 'right'
},
style_header={
'backgroundColor': 'white',
'fontWeight': 'bold'
},
style_data_conditional=[
{
'if': {
'column_id': 'Sign',
'filter_query': '{Sign} = "-"'
},
'backgroundColor': 'dodgerblue',
'color': 'white'
},
{
'if': {
'column_id': 'Sign',
'filter_query': '{Sign} = "+"'
},
'backgroundColor': '#85144b',
'color': 'white'
},
],
)
),
html.P(
html.Label("{} model performance: ".format(model_type))
),
performance_layout,
return_history(history_html)
# html.Div([
# html.Details([
# html.Summary("Performance of History Models"),
# html.Details([
# html.Summary("Performance Records for Regression Model"),
# html.Div(
# dash_table.DataTable(
# id="reg_rec",
# columns=[{'name': val, 'id': val}
# for val in REG_CRITERION],
# data=[],
# style_cell={
# 'height': 'auto',
# 'textAlign': 'right'
# # all three widths are needed
# # 'minWidth': '180px', 'width': '180px', 'maxWidth': '180px',
# # 'minWidth': '100px', 'width': '120px', 'maxWidth': '240px',
# # 'whiteSpace': 'normal'
# }
# )
# ),
# ]),
# html.Details([
# html.Summary("Performance Records for Classification Model"),
# html.Div([
# dash_table.DataTable(
# id="clf_rec",
# columns=[{'name': val, 'id': val}
# for val in CLF_CRITERION],
# data=[],
# style_cell={
# 'height': 'auto',
# 'textAlign': 'right'
# # all three widths are needed
# # 'minWidth': '180px', 'width': '180px', 'maxWidth': '180px',
# # 'minWidth': '100px', 'width': '120px', 'maxWidth': '240px',
# # 'whiteSpace': 'normal'
# }
# ),history_html]
# ),
# ])
# ])
# ])
])
if task_type == "Regression":
return layout, reg_data + [{"Index": len(reg_data)+1, "Label": label, 'Model': model_type, 'Penalty': penalty, 'MAE': round(model_res[1], 5), 'MSE':round(model_res[2], 5),
}], clf_data
elif task_type == "Classification":
return layout, reg_data, clf_data + [{"Index": len(clf_data)+1, "Label": label, 'Model': model_type, "Penalty": penalty, "Accuracy": model_res[1]}]
else:
return [], reg_data, clf_data
else:
# return [], reg_data, clf_data, False
return dash.no_update, dash.no_update, dash.no_update
|
DaPraxis/Interactive-Machine-Learning-Tool-for-Risk-Factor-Analysis2
|
application/plotlydash/dataDownload.py
|
dataDownload.py
|
py
| 34,472 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pandas.DataFrame",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "ast.literal_eval",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "ast.literal_eval",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "ast.literal_eval",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "ast.literal_eval",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "enum.Enum",
"line_number": 76,
"usage_type": "attribute"
},
{
"api_name": "requests.Session",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "dash.Dash",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "layouts.layout.html_layout",
"line_number": 138,
"usage_type": "name"
},
{
"api_name": "flask_sqlalchemy.SQLAlchemy",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Div",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Div",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "dash_core_components.Dropdown",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Div",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "dash_core_components.Loading",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Div",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Div",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "dash_core_components.Location",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Div",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "layouts.page2_layout.server_layout",
"line_number": 187,
"usage_type": "name"
},
{
"api_name": "layouts.page3_layout.layout3",
"line_number": 189,
"usage_type": "name"
},
{
"api_name": "dash.dependencies.Output",
"line_number": 180,
"usage_type": "call"
},
{
"api_name": "dash.dependencies.Input",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "dash.exceptions.PreventUpdate",
"line_number": 198,
"usage_type": "name"
},
{
"api_name": "dash.dependencies.Output",
"line_number": 194,
"usage_type": "call"
},
{
"api_name": "dash.dependencies",
"line_number": 194,
"usage_type": "attribute"
},
{
"api_name": "dash.dependencies.Input",
"line_number": 195,
"usage_type": "call"
},
{
"api_name": "dash.dependencies",
"line_number": 195,
"usage_type": "attribute"
},
{
"api_name": "pandas.read_sql_query",
"line_number": 211,
"usage_type": "call"
},
{
"api_name": "dash.dependencies.Output",
"line_number": 201,
"usage_type": "call"
},
{
"api_name": "dash.dependencies.Input",
"line_number": 202,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Div",
"line_number": 224,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Hr",
"line_number": 226,
"usage_type": "call"
},
{
"api_name": "dash_html_components.A",
"line_number": 227,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Button",
"line_number": 227,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Hr",
"line_number": 228,
"usage_type": "call"
},
{
"api_name": "dash_core_components.Loading",
"line_number": 229,
"usage_type": "call"
},
{
"api_name": "dash_table.DataTable",
"line_number": 230,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Hr",
"line_number": 250,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Div",
"line_number": 267,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Br",
"line_number": 268,
"usage_type": "call"
},
{
"api_name": "dash_core_components.Dropdown",
"line_number": 269,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Div",
"line_number": 284,
"usage_type": "call"
},
{
"api_name": "dash.exceptions.PreventUpdate",
"line_number": 288,
"usage_type": "name"
},
{
"api_name": "dash.dependencies.Output",
"line_number": 256,
"usage_type": "call"
},
{
"api_name": "dash.dependencies",
"line_number": 256,
"usage_type": "attribute"
},
{
"api_name": "dash.dependencies.Input",
"line_number": 257,
"usage_type": "call"
},
{
"api_name": "dash.dependencies",
"line_number": 257,
"usage_type": "attribute"
},
{
"api_name": "dash_html_components.Div",
"line_number": 322,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Div",
"line_number": 323,
"usage_type": "call"
},
{
"api_name": "dash_html_components.H3",
"line_number": 324,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Div",
"line_number": 326,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Ul",
"line_number": 327,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Li",
"line_number": 327,
"usage_type": "call"
},
{
"api_name": "dash.exceptions.PreventUpdate",
"line_number": 333,
"usage_type": "name"
},
{
"api_name": "dash.dependencies.Output",
"line_number": 315,
"usage_type": "call"
},
{
"api_name": "dash.dependencies",
"line_number": 315,
"usage_type": "attribute"
},
{
"api_name": "dash.dependencies.Input",
"line_number": 316,
"usage_type": "call"
},
{
"api_name": "dash.dependencies",
"line_number": 316,
"usage_type": "attribute"
},
{
"api_name": "pandas.read_sql_query",
"line_number": 343,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Div",
"line_number": 349,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Div",
"line_number": 350,
"usage_type": "call"
},
{
"api_name": "dash_html_components.H3",
"line_number": 351,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Div",
"line_number": 353,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Ul",
"line_number": 354,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Li",
"line_number": 354,
"usage_type": "call"
},
{
"api_name": "dash_core_components.Loading",
"line_number": 359,
"usage_type": "call"
},
{
"api_name": "dash_core_components.Graph",
"line_number": 360,
"usage_type": "call"
},
{
"api_name": "dash.exceptions.PreventUpdate",
"line_number": 375,
"usage_type": "name"
},
{
"api_name": "dash.dependencies.Output",
"line_number": 336,
"usage_type": "call"
},
{
"api_name": "dash.dependencies",
"line_number": 336,
"usage_type": "attribute"
},
{
"api_name": "dash.dependencies.Output",
"line_number": 337,
"usage_type": "call"
},
{
"api_name": "dash.dependencies",
"line_number": 337,
"usage_type": "attribute"
},
{
"api_name": "dash.dependencies.Input",
"line_number": 338,
"usage_type": "call"
},
{
"api_name": "dash.dependencies",
"line_number": 338,
"usage_type": "attribute"
},
{
"api_name": "pandas.read_sql_query",
"line_number": 387,
"usage_type": "call"
},
{
"api_name": "plotly.express.histogram",
"line_number": 391,
"usage_type": "call"
},
{
"api_name": "plotly.express",
"line_number": 391,
"usage_type": "name"
},
{
"api_name": "plotly.express.box",
"line_number": 393,
"usage_type": "call"
},
{
"api_name": "plotly.express",
"line_number": 393,
"usage_type": "name"
},
{
"api_name": "plotly.express.histogram",
"line_number": 395,
"usage_type": "call"
},
{
"api_name": "plotly.express",
"line_number": 395,
"usage_type": "name"
},
{
"api_name": "dash.exceptions.PreventUpdate",
"line_number": 398,
"usage_type": "name"
},
{
"api_name": "dash.dependencies.Output",
"line_number": 380,
"usage_type": "call"
},
{
"api_name": "dash.dependencies",
"line_number": 380,
"usage_type": "attribute"
},
{
"api_name": "dash.dependencies.Input",
"line_number": 382,
"usage_type": "call"
},
{
"api_name": "dash.dependencies",
"line_number": 382,
"usage_type": "attribute"
},
{
"api_name": "dash.no_update",
"line_number": 404,
"usage_type": "attribute"
},
{
"api_name": "dash.dependencies.Output",
"line_number": 400,
"usage_type": "call"
},
{
"api_name": "dash.dependencies.Input",
"line_number": 401,
"usage_type": "call"
},
{
"api_name": "dash.no_update",
"line_number": 416,
"usage_type": "attribute"
},
{
"api_name": "dash.dependencies.Output",
"line_number": 408,
"usage_type": "call"
},
{
"api_name": "dash.dependencies.Input",
"line_number": 409,
"usage_type": "call"
},
{
"api_name": "dash.dependencies.Output",
"line_number": 418,
"usage_type": "call"
},
{
"api_name": "dash.dependencies.Input",
"line_number": 418,
"usage_type": "call"
},
{
"api_name": "dash.dependencies.Output",
"line_number": 422,
"usage_type": "call"
},
{
"api_name": "dash.dependencies.Input",
"line_number": 422,
"usage_type": "call"
},
{
"api_name": "dash.no_update",
"line_number": 446,
"usage_type": "attribute"
},
{
"api_name": "pandas.read_sql_query",
"line_number": 448,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Div",
"line_number": 460,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Div",
"line_number": 461,
"usage_type": "call"
},
{
"api_name": "dash_table.DataTable",
"line_number": 462,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 509,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Div",
"line_number": 511,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Div",
"line_number": 529,
"usage_type": "call"
},
{
"api_name": "dash_table.DataTable",
"line_number": 530,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Div",
"line_number": 552,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Details",
"line_number": 553,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Summary",
"line_number": 554,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Details",
"line_number": 555,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Summary",
"line_number": 556,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Div",
"line_number": 557,
"usage_type": "call"
},
{
"api_name": "dash_table.DataTable",
"line_number": 558,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Details",
"line_number": 578,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Summary",
"line_number": 579,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Div",
"line_number": 580,
"usage_type": "call"
},
{
"api_name": "dash_table.DataTable",
"line_number": 581,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Div",
"line_number": 602,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Details",
"line_number": 605,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Summary",
"line_number": 606,
"usage_type": "call"
},
{
"api_name": "plotly.figure_factory.create_annotated_heatmap",
"line_number": 626,
"usage_type": "call"
},
{
"api_name": "plotly.figure_factory",
"line_number": 626,
"usage_type": "name"
},
{
"api_name": "dash_html_components.Div",
"line_number": 627,
"usage_type": "call"
},
{
"api_name": "dash_html_components.P",
"line_number": 628,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Label",
"line_number": 629,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Div",
"line_number": 631,
"usage_type": "call"
},
{
"api_name": "dash_table.DataTable",
"line_number": 632,
"usage_type": "call"
},
{
"api_name": "dash_html_components.P",
"line_number": 669,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Label",
"line_number": 670,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Div",
"line_number": 677,
"usage_type": "call"
},
{
"api_name": "dash_core_components.Graph",
"line_number": 678,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Div",
"line_number": 683,
"usage_type": "call"
},
{
"api_name": "dash_html_components.P",
"line_number": 684,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Label",
"line_number": 685,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Div",
"line_number": 687,
"usage_type": "call"
},
{
"api_name": "dash_table.DataTable",
"line_number": 688,
"usage_type": "call"
},
{
"api_name": "dash_html_components.P",
"line_number": 725,
"usage_type": "call"
},
{
"api_name": "dash_html_components.Label",
"line_number": 726,
"usage_type": "call"
},
{
"api_name": "dash.no_update",
"line_number": 784,
"usage_type": "attribute"
},
{
"api_name": "dash.dependencies.Output",
"line_number": 426,
"usage_type": "call"
},
{
"api_name": "dash.dependencies.Output",
"line_number": 427,
"usage_type": "call"
},
{
"api_name": "dash.dependencies.Output",
"line_number": 428,
"usage_type": "call"
},
{
"api_name": "dash.dependencies.Input",
"line_number": 429,
"usage_type": "call"
},
{
"api_name": "dash.dependencies.State",
"line_number": 430,
"usage_type": "call"
},
{
"api_name": "dash.dependencies.State",
"line_number": 431,
"usage_type": "call"
},
{
"api_name": "dash.dependencies.State",
"line_number": 432,
"usage_type": "call"
},
{
"api_name": "dash.dependencies.State",
"line_number": 433,
"usage_type": "call"
},
{
"api_name": "dash.dependencies.State",
"line_number": 434,
"usage_type": "call"
},
{
"api_name": "dash.dependencies.State",
"line_number": 435,
"usage_type": "call"
},
{
"api_name": "dash.dependencies.State",
"line_number": 436,
"usage_type": "call"
},
{
"api_name": "dash.dependencies.State",
"line_number": 437,
"usage_type": "call"
}
] |
71452829947
|
import numpy as np
from functools import partial
#from multiprocessing import Pool
import matplotlib.pyplot as plt
from matplotlib import cm
from pandas import DataFrame
import scipy as sc
import scipy.signal
import os
import pdb
import time
def group_into_bands(fft, fft_freq, nfreq_bands):
if nfreq_bands == 178:
bands = range(1, 180)
elif nfreq_bands == 4:
bands = [0.1, 4, 8, 12, 30]
elif nfreq_bands == 6:
bands = [0.1, 4, 8, 12, 30, 70, 180]
# http://onlinelibrary.wiley.com/doi/10.1111/j.1528-1167.2011.03138.x/pdf
elif nfreq_bands == 8:
bands = [0.1, 4, 8, 12, 30, 50, 70, 100, 180]
elif nfreq_bands == 12:
bands = [0.5, 4, 8, 12, 30, 40, 50, 60, 70, 85, 100, 140, 180]
elif nfreq_bands == 9:
bands = [0.1, 4, 8, 12, 21, 30, 50, 70, 100, 180]
else:
raise ValueError('wrong number of frequency bands')
freq_bands = np.digitize(fft_freq, bands)
#print(freq_bands)
#st = time.clock()
df = DataFrame({'fft': fft, 'band': freq_bands})
#end = time.clock()
#print("DF: " + str(end-st))
#st = time.clock()
df = df.groupby('band').mean()
#end = time.clock()
#print("DFM: " + str(end-st))
return df.fft[1:-1]
def fgroup_into_bands(fft, fft_freq, nfreq_bands):
if nfreq_bands == 178:
bands = range(1, 180)
elif nfreq_bands == 4:
bands = [0.1, 4, 8, 12, 30]
elif nfreq_bands == 6:
bands = [0.1, 4, 8, 12, 30, 70, 180]
# http://onlinelibrary.wiley.com/doi/10.1111/j.1528-1167.2011.03138.x/pdf
elif nfreq_bands == 8:
bands = [0.1, 4, 8, 12, 30, 50, 70, 100, 180]
elif nfreq_bands == 12:
bands = [0.5, 4, 8, 12, 30, 40, 50, 60, 70, 85, 100, 140, 180]
elif nfreq_bands == 9:
bands = [0.1, 4, 8, 12, 21, 30, 50, 70, 100, 180]
else:
raise ValueError('wrong number of frequency bands')
freq_bands = np.digitize(fft_freq, bands)
cutoff_index = [0]
for n in xrange(freq_bands.size):
if(freq_bands[n] != freq_bands[cutoff_index[-1]]):
cutoff_index.append(n)
# the last case is special since it goes to the end
# also we dont need the first bin since we disregard frequencies below lowest bin
df=np.zeros(nfreq_bands)
for n in xrange(2,len(cutoff_index)-1):
df[n-2] = np.mean(fft[cutoff_index[n-1]:cutoff_index[n]])
df[-1] = np.mean(fft[cutoff_index[-2]:cutoff_index[-1]])
# we assume that fft_freq is only increasing
#df = DataFrame({'fft': fft, 'band': freq_bands})
#df = df.groupby('band').mean()
return df
# returns channels x bins x time-frames
def compute_fft(x, data_length_sec, sampling_frequency, nfreq_bands, win_length_sec, stride_sec, features):
n_channels = x.shape[0]
n_timesteps = (data_length_sec - win_length_sec) / stride_sec + 1
n_fbins = nfreq_bands + 1 if 'std' in features else nfreq_bands
x2 = np.zeros((n_channels, n_fbins, n_timesteps))
count = 0
for i in range(n_channels):
xc = np.zeros((n_fbins, n_timesteps))
for frame_num, w in enumerate(range(0, data_length_sec - win_length_sec + 1, stride_sec)):
count += 1
#print frame_num, w
xw = x[i, w * sampling_frequency: (w + win_length_sec) * sampling_frequency]
#st = time.clock()
fft = np.log10(np.absolute(np.fft.rfft(xw, axis = -1)))
#end = time.clock()
#if(count % 111 == 0): print("LOGFFT: " + str(end-st))
#st = time.clock()
fft_freq = np.fft.rfftfreq(n=xw.shape[-1], d=1.0 / sampling_frequency)
fft = np.log10(np.absolute(np.fft.rfft(xw, axis = -1)))
#end = time.clock()
#if(count % 111 == 0): print("RFFTFREQ: " + str(end-st))
#if(frame_num == 1): print(fft_freq)
'''
st = time.clock()
if(count % 111 == 0):print(group_into_bands(fft, fft_freq, nfreq_bands))
end = time.clock()
if(count % 111 == 0): print("GroupBands: " + str(end-st))
st = time.clock()
if(count % 111 == 0):print(fgroup_into_bands(fft, fft_freq, nfreq_bands))
end = time.clock()
if(count % 111 == 0): print("FGroupBands: " + str(end-st))
'''
# test if both are equivalent
#print(fgroup_into_bands(fft, fft_freq, nfreq_bands)-group_into_bands(fft, fft_freq, nfreq_bands))
#st = time.clock()
xc[:nfreq_bands, frame_num] = fgroup_into_bands(fft, fft_freq, nfreq_bands)
#end = time.clock()
#if(count % 111 == 0): print("FGroupBands: " + str(end-st))
#st = time.clock()
if 'std' in features:
xc[-1, frame_num] = np.std(xw)
#end = time.clock()
#if(count % 111 == 0): print("STD: " + str(end-st))
#st = time.clock()
x2[i, :, :] = xc
#end = time.clock()
#print("ASSIGN: " + str(end-st))
print(count)
print(np.amax(x2))
print(np.amin(x2))
return x2
# filters out the low freq and high freq
def filter_opt(x, new_sampling_frequency, data_length_sec, lowcut, highcut):
st = time.clock()
x1 = scipy.signal.resample(x, new_sampling_frequency * data_length_sec, axis=1)
end = time.clock()
print("Resample: " + str(end-st))
nyq = 0.5 * new_sampling_frequency
st = time.clock()
b, a = sc.signal.butter(5, np.array([lowcut, highcut]) / nyq, btype='band')
end = time.clock()
print("Butter: " + str(end-st))
st = time.clock()
x_filt = sc.signal.lfilter(b, a, x1, axis=1)
end = time.clock()
print("lFilter: " + str(end-st))
return np.float32(x_filt)
# Computes X and y from all the .npy files in a directory
# X = n x channels x filters x time-frames
# y = n x 1
def compute_X_Y(direc):
n = len([name for name in os.listdir(direc)])
X = np.zeros((n, 16, 7, 10))
y = np.empty((n, 1))
for i, filename in enumerate(os.listdir(direc)):
if filename.endswith('.npy'):
f = np.load(direc + filename)
f = f.T
filtered = filter_freq(f, 400, 600, 0.1, 180.0)
new_x = compute_fft(filtered, data_length_sec, sampling_frequency, nfreq_bands, win_length_sec, stride_sec, features)
X[i, ] = new_x
if filename.endswith('1.npy'):
y[i] = 1
elif filename.endswith('0.npy'):
y[i] = 0
continue
else:
continue
return X, y
'''
direc_train = '/Users/Anuar_The_Great/desktop/ML/train_1_npy_train/'
direc_test = '/Users/Anuar_The_Great/desktop/ML/train_1_npy_test/'
X_train, y_train = compute_X_Y(direc_train)
X_test, y_test = compute_X_Y(direc_test)
'''
if __name__ == "__main__":
data_length_sec = 600
sampling_frequency = 400
nfreq_bands = 12 # can play around with these:
win_length_sec = 25
stride_sec = 1
features = "meanlog_std" # will create a new additional bin of standard deviation of other bins
f = np.load('data/train_1_npy/1_1_0.npy')['data'][()]
# compute_fft accepts a matrix of channels x time, so we gotta transpose
x = f.T
# Test one observation
filtered = filter(x, 400, 600, 0.1, 180.0)
new_x = compute_fft(x, data_length_sec, sampling_frequency, nfreq_bands, win_length_sec, stride_sec, features)
#pdb.set_trace()
print(new_x.shape)
#print new_x
print(new_x[0])
img2 = plt.imshow(new_x[0][0:-1],interpolation='nearest', cmap = cm.gist_rainbow, origin='lower')
plt.show()
img2 = plt.imshow(new_x[1][0:-1],interpolation='nearest', cmap = cm.gist_rainbow, origin='lower')
plt.show()
|
Anmol6/kaggle-seizure-competition
|
preprocess/pre_processing.py
|
pre_processing.py
|
py
| 7,809 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "numpy.digitize",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "numpy.digitize",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "numpy.log10",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "numpy.absolute",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "numpy.fft.rfft",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "numpy.fft",
"line_number": 98,
"usage_type": "attribute"
},
{
"api_name": "numpy.fft.rfftfreq",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "numpy.fft",
"line_number": 103,
"usage_type": "attribute"
},
{
"api_name": "numpy.log10",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "numpy.absolute",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "numpy.fft.rfft",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "numpy.fft",
"line_number": 104,
"usage_type": "attribute"
},
{
"api_name": "numpy.std",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "numpy.amax",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "numpy.amin",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "time.clock",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "scipy.signal.resample",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "scipy.signal",
"line_number": 148,
"usage_type": "attribute"
},
{
"api_name": "time.clock",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "time.clock",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "scipy.signal.butter",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "scipy.signal",
"line_number": 154,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "time.clock",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "time.clock",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "scipy.signal.lfilter",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "scipy.signal",
"line_number": 159,
"usage_type": "attribute"
},
{
"api_name": "time.clock",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "numpy.empty",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 172,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 202,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 213,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 213,
"usage_type": "name"
},
{
"api_name": "matplotlib.cm.gist_rainbow",
"line_number": 213,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.cm",
"line_number": 213,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 214,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 214,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 215,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 215,
"usage_type": "name"
},
{
"api_name": "matplotlib.cm.gist_rainbow",
"line_number": 215,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.cm",
"line_number": 215,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 216,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 216,
"usage_type": "name"
}
] |
34002077692
|
# SWEA 5105 미로의 거리
from collections import deque
def bfs(x, y):
q = deque([(x, y)])
arr[x][y] = 0
dr = [-1, 1, 0, 0]
dc = [0, 0, -1, 1]
while q:
r, c = q.popleft()
for i in range(4):
nr = r + dr[i]
nc = c + dc[i]
if 0 <= nr < N and 0 <= nc < N and arr[nr][nc] == -1:
if maze[nr][nc] == 0:
arr[nr][nc] = arr[r][c] + 1
q.append((nr, nc))
elif maze[nr][nc] == 3:
arr[nr][nc] = arr[r][c]
return
T = int(input())
for tc in range(1, T + 1):
N = int(input())
maze = [list(map(int, list(input()))) for _ in range(N)]
arr = [[-1 for _ in range(N)] for _ in range(N)]
for i in range(N):
for j in range(N):
if maze[i][j] == 2:
bfs(i, j)
for i in range(N):
for j in range(N):
if maze[i][j] == 3:
if arr[i][j] == -1:
print(f"#{tc} 0")
else:
print(f"#{tc} {arr[i][j]}")
|
jinmoonJ/algorithm
|
0825/SWEA_5105.py
|
SWEA_5105.py
|
py
| 1,147 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "collections.deque",
"line_number": 6,
"usage_type": "call"
}
] |
24696329244
|
"""
Author : Animesh Bhasin
Version : 1
Version Date : 16th Nov 2022
Description : This script is to read the data from City of New York API and load into postgres db
"""
import requests
import pandas as pd
from datetime import date, timedelta
from sqlalchemy import create_engine
import os
def main():
"""
Main function
"""
crashes_url = 'https://data.cityofnewyork.us/resource/h9gi-nx95.json'
column_list = ['crash_date', 'crash_time', 'borough', 'zip_code', 'location_latitude', 'location_longitude',
'location_location', 'on_street_name', 'off_street_name', 'cross_street_name',
'number_of_persons_injured', 'number_of_persons_killed', 'number_of_pedestrians_injured',
'number_of_pedestrians_killed', 'number_of_cyclist_injured', 'number_of_cyclist_killed',
'number_of_motorist_injured', 'number_of_motorist_killed', 'contributing_factor_vehicle_1',
'contributing_factor_vehicle_2', 'contributing_factor_vehicle_3', 'contributing_factor_vehicle_4',
'contributing_factor_vehicle_5', 'collision_id', 'vehicle_type_code1', 'vehicle_type_code2',
'vehicle_type_code_3', 'vehicle_type_code_4', 'vehicle_type_code_5']
#Connect to database
database_url = os.getenv('database_url')
schema_name = 'crash'
crashes_table_name = 'crashes'
engine = create_engine(database_url, echo=False)
#Find maximum date for which data is present in db( to intelligently incrementally process data)
crash_start_date = get_date_max_date_in_db(engine, schema_name, crashes_table_name)
#Find dates to process
date_list = get_list_of_dates_to_process(crash_start_date)
print ('Processing dates : {}'.format(date_list))
#Process data for each date
for crash_date in date_list:
print('Processing date : {}'.format(crash_date))
#Read data from API
data = read_data_for_crash_date(crash_date, crashes_url)
if data:
#convert dictionary data to dataframe
df, collision_id_list_str = get_data_df(data, column_list)
#Write data to database
write_to_db(engine, df, collision_id_list_str, schema_name, crashes_table_name)
def get_date_max_date_in_db(engine, schema_name, table_name):
"""
Function to get max crash date from database
Args:
engine (SQLAlchemyEngine): SQL Alchemy engine created from database
schema_name(str): Schema Name
table_name(str): Table Name
Returns:
(date): Max date from database
"""
sql_statement = '''select max(crash_date + interval '1' day) from {}.{};'''.format(schema_name, table_name)
with engine.connect() as connection:
result = connection.execute(sql_statement)
for row in result:
max_date = row[0]
if max_date:
return max_date
else:
return (date.today() - timedelta(days=5)).strftime('%Y-%m-%d')
def get_list_of_dates_to_process(start_date):
"""
Function to list of dates to process
Args:
start_date(date): Starting date
Returns:
(date): List of dates from start date to yesterday's date
"""
end_date = date.today() - timedelta(days=2)
date_df = pd.date_range(start_date, end_date, freq='d')
return date_df.strftime('%Y-%m-%d').to_list()
def read_data_for_crash_date(crash_date, api_url):
"""
Function to read json data from API using requests library
Args:
crash_date(date): Date for which crashes data is fetched
api_url(string): API endpoint for crashes data
Returns:
data(dict): Return the json response of API containing crashes data for that date
"""
params = {'crash_date': crash_date}
r = requests.get(url=api_url, params=params)
# extracting data in json format
data = r.json()
return data
def get_data_df(data, column_list):
"""
Function to convert json data to dataframe
Args:
data(dict): json response of API containing crashes data
column_list(list): List containing key names to be processed from data dict
Returns:
df(Dataframe) : Dataframe containing collisions data
collision_id_list_str(str) : A String of list of collision ids being processed needed so that
existing collision id records can be deleted from database before inserting them
to avoid duplicates in database
"""
df = pd.json_normalize(data, sep='_')
df = df.drop(columns=[col for col in df if col not in column_list])
collision_id_list = df['collision_id'].tolist()
collision_id_list_str = str(collision_id_list)[1:-1]
return df, collision_id_list_str
def write_to_db(engine, df, collision_id_list_str, schema_name, table_name):
"""
Function to write the dataframe to database
Args:
engine (SQLAlchemyEngine): SQL Alchemy engine created from database
df(Dataframe) : Dataframe containing collisions data
collision_id_list_str(str) : A String of list of collision ids being processed needed so that
existing collision id records can be deleted from database before inserting them
to avoid duplicates in database
schema_name(str): Schema Name
table_name(str): Table Name
"""
#Delete existing collision id records to ensure deduplication
sql_statement = 'delete from {}.{} where collision_id in ({})'.format(schema_name, table_name,
collision_id_list_str)
engine.execute(sql_statement)
df.to_sql(name=table_name, schema=schema_name, con=engine, if_exists='append', index=False)
if __name__ == '__main__':
main()
|
Sapphirine/202112-26-NYC-Vehicle-Crash-Analysis
|
get_crashes.py
|
get_crashes.py
|
py
| 5,844 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "os.getenv",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.create_engine",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "datetime.date.today",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "datetime.timedelta",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "datetime.date.today",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 86,
"usage_type": "name"
},
{
"api_name": "datetime.timedelta",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "pandas.date_range",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "pandas.json_normalize",
"line_number": 122,
"usage_type": "call"
}
] |
21776740540
|
import sys
import os
import click
import json
import re
import pandas as pd
import numpy as np
from PIL import ImageTk, Image
from tkinter import Tk
from .modules.interface import AnnotationInterface
@click.version_option("0.0.1")
@click.command()
@click.argument('image_dir', required=True)
@click.argument('bindings_json', required=True)
@click.option('--csv', required=False, default=None,
help='Location of annotation CSV file')
@click.option('--o', default="output.csv", required=False,
help='File to save annotations to')
@click.option('--start_i', default=0, required=False)
@click.option('--window_w', default=764, required=False)
@click.option('--window_h', default=1200, required=False)
@click.option('--resize_w', default=382, required=False)
@click.option('--resize_h', default=470, required=False)
@click.option('--filetype', default='jpg', required=False)
def main(image_dir, bindings_json, csv,
o, start_i,
window_w, window_h,
resize_w, resize_h,
filetype):
r"""
EXPERIMENTAL
Picture annotation tool.
CSV should contain 'image' column as first column specifying image names
without the directory.
"""
print("csv =", csv)
print("output file=", o)
print("image_dir=", image_dir)
print("bindings_json =", bindings_json)
print("start_i =", start_i)
print("window_w =", window_w)
print("window_h =", window_h)
print("resize_w =", resize_w)
print("resize_h =", resize_h)
print("filetype =", filetype)
with open(bindings_json) as json_file:
json_dict = json.load(json_file)
bindings = json_dict['bindings']
print("bindings =", bindings)
classes = list(bindings.values())
print("classes = ", classes)
filetype_regex = re.compile(f".+\\.{filetype}$")
# Default behaviour: if CSV is not provided, create empty dataframe with the
# categories set to the classes from the keybindings and the number of
# rows corresponding to the pictures .
# If CSV is provided, use that as the annotation df (make sure the
# keybinding classes are a subset of the annotation classes)
if csv is None:
assert not os.path.isfile(o), "csv not specified but output.csv file exists"
files_in_image_dir = os.listdir(image_dir)
image_names = list(filter(filetype_regex.match, files_in_image_dir))
num_pictures = len(image_names)
annotation_classes = classes
annotation_values = np.zeros((num_pictures, len(annotation_classes)))
annotations = pd.concat((pd.DataFrame(image_names), pd.DataFrame(annotation_values)), axis=1)
annotations.columns = ['image'] + annotation_classes
else:
annotations = pd.read_csv(csv)
annotation_classes = [x for i,x in
enumerate(annotations.columns) if i>0]
root = Tk()
ai = AnnotationInterface(master=root,
bindings=bindings,
classes=classes,
image_dir=image_dir,
data_df=annotations,
save_csv=o,
window_w = window_w, window_h = window_h,
start_i = start_i)
# Key bindings
for curr_key, curr_class in zip(bindings.keys(), classes):
print(f"bound {curr_key} to {curr_class}")
root.bind(curr_key, ai.pressEvent)
root.bind("<Return>", ai.prev_picture)
root.bind("<space>", ai.next_picture)
root.mainloop()
if __name__ == '__main__':
args = sys.argv
if "--help" in args or len(args) == 1:
print("Picture annotation tool")
main()
|
jacobhepkema/annotpics
|
annotpics/__main__.py
|
__main__.py
|
py
| 3,726 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "json.load",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "tkinter.Tk",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "modules.interface.AnnotationInterface",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "click.version_option",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "click.command",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "click.argument",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "click.argument",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "click.option",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "click.option",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "click.option",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "click.option",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "click.option",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "click.option",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "click.option",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "click.option",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 100,
"usage_type": "attribute"
}
] |
21246199914
|
import os
import pytest
import pandas as pd
from typing import List
from faker import Faker
from dataclasses import dataclass
from prepare_dataset import read_data, train_target
@dataclass()
class TrainingPipelineParams:
input_data_path: str
@pytest.fixture()
def dataset_path(tmpdir):
fake = Faker()
fake.set_arguments('age', {'min_value': 29, 'max_value': 100})
fake.set_arguments('sex', {'min_value': 0, 'max_value': 1})
fake.set_arguments('resteg', {'min_value': 0, 'max_value': 3})
fake.set_arguments('target', {'min_value': 0, 'max_value': 1})
sample_csv = fake.csv(header=('age', 'sex', 'resteg', 'target'),
data_columns=('{{pyint: age}}',
'{{pyint: sex}}',
'{{pyint: resteg}}',
'{{pyint: target}}'),
num_rows=10)
dataset_fio = tmpdir.join('sample.csv')
dataset_fio.write(sample_csv)
return dataset_fio
@pytest.fixture()
def train_pipeline_params(dataset_path):
params = TrainingPipelineParams
params.input_data_path = dataset_path
return params
@pytest.fixture()
def target_col():
return 'target'
def test_load_dataset_using_path(train_pipeline_params, target_col):
df = read_data(train_pipeline_params)
assert isinstance(df, pd.DataFrame)
assert 10 == len(df)
assert target_col in df.keys()
def test_train_target(tmpdir, train_pipeline_params):
df = read_data(train_pipeline_params)
columns = ['age', 'sex', 'resteg']
train, target = train_target(df, columns, 'target')
assert (10, 3) == train.shape
assert pd.Series == type(target)
assert (10,) == target.shape
|
made-ml-in-prod-2021/alexshevchuk7
|
ml_project/tests/test_prepare_data.py
|
test_prepare_data.py
|
py
| 1,815 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "dataclasses.dataclass",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "faker.Faker",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pytest.fixture",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pytest.fixture",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "pytest.fixture",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "prepare_dataset.read_data",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "prepare_dataset.read_data",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "prepare_dataset.train_target",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "pandas.Series",
"line_number": 61,
"usage_type": "attribute"
}
] |
3361094578
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from functions import noso
from functions import dtdu_backward
from functions import dodt
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
dtype = torch.float
class fcn(nn.Module):
def __init__(self, task, thresh, tau_m, tau_s, num_steps, frate):
super(fcn, self).__init__()
self.task = task
self.thresh = thresh
self.tau_m = tau_m
self.tau_s = tau_s
self.num_steps = num_steps
self.frate = frate
if self.task == 'mnist':
self.cfg_fc = [784, 400, 10]
elif self.task == 'nmnist':
self.cfg_fc = [34*34*2, 800, 10]
self.num_steps = 300
self.fc1 = nn.Linear(self.cfg_fc[0], self.cfg_fc[1], bias=False).float()
self.fc2 = nn.Linear(self.cfg_fc[1], self.cfg_fc[2], bias=False).float()
nn.init.normal_(self.fc1.weight, mean=0, std=np.sqrt(2/self.cfg_fc[0]))
nn.init.normal_(self.fc2.weight, mean=0, std=np.sqrt(2/self.cfg_fc[1]))
def forward(self, input, batch_size):
h1_vm = h1_vs = torch.zeros(batch_size, self.cfg_fc[0], dtype=dtype, device=device)
h1_um = h1_us = h1_spike = torch.zeros(batch_size, self.cfg_fc[1], dtype=dtype, device=device)
h1_sav = torch.ones(batch_size, self.cfg_fc[1], dtype=dtype, device=device)
h2_vm = h2_vs = torch.zeros(batch_size, self.cfg_fc[1], dtype=dtype, device=device)
h2_um = h2_us = h2_spike = h2_u = torch.zeros(batch_size, self.cfg_fc[2], dtype=dtype, device=device)
h2_sav = torch.ones(batch_size, self.cfg_fc[2], dtype=dtype, device=device)
# output
out_t = self.num_steps * torch.ones(batch_size, self.cfg_fc[2], dtype=dtype, device=device)
out_u = torch.zeros(batch_size, self.cfg_fc[2], dtype=dtype, device=device)
sum_sp = torch.zeros(len(self.cfg_fc))
# for backprop
h1_v = torch.zeros(batch_size, self.cfg_fc[1], self.cfg_fc[0], dtype=dtype, device=device)
h2_v = torch.zeros(batch_size, self.cfg_fc[2], self.cfg_fc[1], dtype=dtype, device=device)
h1_dvdt = torch.zeros(batch_size, self.cfg_fc[1], self.cfg_fc[0], dtype=dtype, device=device)
h2_dvdt = torch.zeros(batch_size, self.cfg_fc[2], self.cfg_fc[1], dtype=dtype, device=device)
h1_dtdu = torch.zeros(batch_size, self.cfg_fc[1], dtype=dtype, device=device)
h2_dtdu = torch.zeros(batch_size, self.cfg_fc[2], dtype=dtype, device=device)
for step in range(self.num_steps):
# MNIST input encoding : Poisson spike generation
if self.task == 'mnist':
in_spike = (input * self.frate > torch.rand(input.size(), device=device)).view(batch_size, -1).float()
# N-MNIST input encoding
elif self.task == 'nmnist':
in_spike = input[:,:,:,:, step].view(batch_size, -1)
# Calculation of first hidden layer
h1_sav, h1_vm, h1_vs, h1_um, h1_us, h1_spike = noso(self.thresh,
self.tau_m,
self.tau_s,
self.fc1,
in_spike,
h1_sav,
h1_vm,
h1_vs,
h1_spike,
outneuron=False)
# Calculation of output layer
h2_sav, h2_vm, h2_vs, h2_um, h2_us, h2_u, h2_spike = noso(self.thresh,
self.tau_m,
self.tau_s,
self.fc2,
h1_spike,
h2_sav,
h2_vm,
h2_vs,
h2_spike,
outneuron=True)
# Recoding of output spike timings and output membrane potential
out_t += dodt.apply(h2_spike, step, self.num_steps)
out_u += h2_spike * h2_u
sum_sp[0] += in_spike.sum().item()
sum_sp[1] += h1_spike.sum().item()
sum_sp[2] += h2_spike.sum().item()
# for backprop
h1_v += torch.unsqueeze((h1_vm - h1_vs), 1) * torch.unsqueeze(h1_spike, 2)
h2_v += torch.unsqueeze((h2_vm - h2_vs), 1) * torch.unsqueeze(h2_spike, 2)
h1_dvdt += torch.unsqueeze((h1_vm / self.tau_m - h1_vs / self.tau_s), 1) * torch.unsqueeze(h1_spike, 2)
h2_dvdt += torch.unsqueeze((h2_vm / self.tau_m - h2_vs / self.tau_s), 1) * torch.unsqueeze(h2_spike, 2)
h1_dtdu += dtdu_backward(h1_um, h1_us, h1_spike, self.thresh, self.tau_m, self.tau_s)
h2_dtdu += dtdu_backward(h2_um, h2_us, h2_spike, self.thresh, self.tau_m, self.tau_s)
if out_t.max() < self.num_steps:
return out_t, out_u, sum_sp, [h1_v, h2_v], [h1_dvdt, h2_dvdt], [h1_dtdu, h2_dtdu]
return out_t, out_u, sum_sp, [h1_v, h2_v], [h1_dvdt, h2_dvdt], [h1_dtdu, h2_dtdu]
|
dooseokjeong/BPTC-NOSO
|
SNN_folded/network.py
|
network.py
|
py
| 5,968 |
python
|
en
|
code
| 3 |
github-code
|
6
|
[
{
"api_name": "torch.device",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "torch.float",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.Module",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "torch.nn.init.normal_",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "torch.nn.init",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "numpy.sqrt",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "torch.nn.init.normal_",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "torch.nn.init",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "numpy.sqrt",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "torch.zeros",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "torch.zeros",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "torch.ones",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "torch.zeros",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "torch.zeros",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "torch.ones",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "torch.ones",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "torch.zeros",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "torch.zeros",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "torch.zeros",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "torch.zeros",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "torch.zeros",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "torch.zeros",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "torch.zeros",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "torch.zeros",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "torch.rand",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "functions.noso",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "functions.noso",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "functions.dodt.apply",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "functions.dodt",
"line_number": 92,
"usage_type": "name"
},
{
"api_name": "torch.unsqueeze",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "torch.unsqueeze",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "torch.unsqueeze",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "torch.unsqueeze",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "functions.dtdu_backward",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "functions.dtdu_backward",
"line_number": 107,
"usage_type": "call"
}
] |
30451465881
|
from calendar import c
from re import S
import sys
import time
import smbus
import spidev
import RPi.GPIO as GPIO
import logging
from ctypes import *
logger = logging.getLogger()
# logger.setLevel(logging.INFO) # Display all print information
logger.setLevel(logging.FATAL) # If you don’t want to display too many prints, only print errors, please use this option
ph = logging.StreamHandler()
formatter = logging.Formatter("%(asctime)s - [%(filename)s %(funcName)s]:%(lineno)d - %(levelname)s: %(message)s")
ph.setFormatter(formatter)
logger.addHandler(ph)
## I2C communication address when SDO is grounded
DFROBOT_BME280_I2C_ADDR_SDO_GND = 0x76
## I2C communication address when SDO is connected to power
DFROBOT_BME280_I2C_ADDR_SDO_VDD = 0x77
## BME280 chip version
BME280_REG_CHIP_ID_DEFAULT = 0x60
# BME280 register address
## 0x88-0xA1 is calibration data, calib00..calib25
BME280_CALIB_DATA_00_25 = 0x88
## The “CHIP_ID” register contains the chip identification code.
BME280_CHIP_ID = 0xD0
## Triggers a reset, all user configuration settings are overwritten with their default state.
BME280_CMD_RESET = 0xE0
## 0xE1-0xF0 is calibration data, calib26..calib41
BME280_CALIB_DATA_26_41 = 0xE1
## The “CTRL_HUM” register
BME280_CTRL_HUM = 0xF2
## The Sensor Status Flags are stored in the “STATUS” register.
BME280_STATUS = 0xF3
## The “CTRL_MEAS” register
BME280_CTRL_MEAS = 0xF4
## The “CONFIG” register
BME280_CONFIG = 0xF5
## The 24Bit pressure data is split and stored in three consecutive registers.
BME280_PRESS_DATA_MSB = 0xF7
## The 24Bit temperature data is split and stored in three consecutive registered.
BME280_TEMP_DATA_MSB = 0xFA
## The 16Bit temperature data is split and stored in two consecutive registered.
BME280_HUM_DATA_MSB = 0xFD
# Sensor configuration
## Sleep mode: It will be in sleep mode by default after power-on reset. In this mode, no measurement is performed and power consumption is minimal.
## All registers are accessible for reading the chip ID and compensation coefficient.
SLEEP_MODE = 0x00
## Forced mode: In this mode, the sensor will take a single measurement according to the selected measurement and filtering options. After the
## measurement is completed, the sensor will return to sleep mode, and the measurement result can be obtained in the register.
FORCED_MODE = 0x01
## Normal mode: Continuously loop between the measurement period and the standby period. The output data rates are related to the ODR mode setting.
NORMAL_MODE = 0x03
## temperature oversampling settings
BME280_TEMP_OSR_SETTINGS = (0x00, 0x20, 0x40, 0x60, 0x80, 0xA0)
## pressure oversampling settings
BME280_PRESS_OSR_SETTINGS = (0x00, 0x04, 0x08, 0x0C, 0x10, 0x14)
## humidity oversampling settings
BME280_HUMI_OSR_SETTINGS = (0x00, 0x01, 0x02, 0x03, 0x04, 0x05)
## IIR filter settings
BME280_IIR_FILTER_SETTINGS = (0x00, 0x04, 0x08, 0x0C, 0x10)
# Controls inactive duration tstandby in normal mode.
## Controls inactive duration tstandby; ODR 2000Hz; Sampling period:0.5 ms
BME280_CONFIG_STANDBY_TIME_0P5 = 0x00
## Sampling period:62.5 ms
BME280_CONFIG_STANDBY_TIME_62P5 = 0x20
## Sampling period:125 ms
BME280_CONFIG_STANDBY_TIME_125 = 0x40
## Sampling period:250 ms
BME280_CONFIG_STANDBY_TIME_250 = 0x60
## Sampling period:500 ms
BME280_CONFIG_STANDBY_TIME_500 = 0x80
## Sampling period:1000 ms
BME280_CONFIG_STANDBY_TIME_1000 = 0xA0
## Sampling period:10 ms
BME280_CONFIG_STANDBY_TIME_10 = 0xC0
## Sampling period:20 ms
BME280_CONFIG_STANDBY_TIME_20 = 0xE0
## Triggers a reset, all user configuration settings are overwritten with their default state.
BME280_CMD_RESET_VALUE = 0xB6
## Standard sea level pressure, unit: pa
STANDARD_SEA_LEVEL_PRESSURE_PA = 101325.00
class DFRobot_BME280(object):
'''!
@brief define DFRobot_BME280 base class
@details for driving the pressure sensor
'''
def __init__(self):
'''!
@brief Module init
'''
# Sea level pressure in Pa.
self.sea_level_pressure = STANDARD_SEA_LEVEL_PRESSURE_PA
self._t_fine = 0
def begin(self):
'''!
@brief Initialize sensor
@return return initialization status
@retval True indicate initialization succeed
@retval False indicate initialization failed
'''
ret = False
chip_id = self._read_reg(BME280_CHIP_ID, 1)
logger.info(chip_id[0])
if chip_id[0] == BME280_REG_CHIP_ID_DEFAULT:
self.reset()
time.sleep(0.3)
self._get_coefficients()
self.set_config_filter(BME280_IIR_FILTER_SETTINGS[0])
self.set_config_T_standby(BME280_CONFIG_STANDBY_TIME_125)
self.set_ctrl_meas_sampling_temp(BME280_TEMP_OSR_SETTINGS[3])
self.set_ctrl_meas_sampling_press(BME280_PRESS_OSR_SETTINGS[3])
self.set_ctrl_sampling_humi(BME280_HUMI_OSR_SETTINGS[3])
self.set_ctrl_meas_mode(NORMAL_MODE)
time.sleep(2) # warm-up
ret = True
return ret
@property
def get_temperature(self):
'''!
@brief Get pressure measurement value from register, working range (-40 ~ +85 °C)
@return Return temperature measurements, unit: °C
'''
data = self._read_reg(BME280_TEMP_DATA_MSB, 3)
raw = data[0] << 12 | data[1] << 4 | (data[2] & 0x0F)
# datasheet, Trimming Coefficient listing in register map with size and sign attributes
t1, t2, t3, p1, p2, p3, p4, p5, p6, p7, p8, p9, h1, h2, h3, h4, h5, h6 = self._data_calib
v1 = ((((raw >> 3) - (t1 << 1))) * t2) >> 11
v2 = (((((raw >> 4) - t1) * ((raw >> 4) - t1)) >> 12) * t3) >> 14
self._t_fine = v1 + v2
rslt = (self._t_fine * 5 + 128) >> 8
return (float(rslt) / 100) # round((rslt / 100), 2)
@property
def get_pressure(self):
'''!
@brief Get pressure measurement value from register, working range (300 ~ 1100 hPa)
@return Return pressure measurements, unit: Pa
@attention If the reference value is provided before, the absolute value of the current
@n position pressure is calculated according to the calibrated sea level atmospheric pressure
'''
data = self._read_reg(BME280_PRESS_DATA_MSB, 3)
raw = data[0] << 12 | data[1] << 4 | (data[2] & 0x0F)
# datasheet, Trimming Coefficient listing in register map with size and sign attributes
t1, t2, t3, p1, p2, p3, p4, p5, p6, p7, p8, p9, h1, h2, h3, h4, h5, h6 = self._data_calib
self.get_temperature # update _t_fine
v1 = self._t_fine - 128000
v2 = v1 * v1 * p6
v2 = v2 + ((v1 * p5) << 17)
v2 = v2 + (p4 << 35)
v1 = ((v1 * v1 * p3) >> 8) + ((v1 * p2) << 12)
v1 = (((1 << 47) + v1)) * p1 >> 33
if(v1 == 0):
return 0
rslt = 1048576 - raw
rslt = (((rslt << 31) - v2) * 3125) / v1
v1 = (p9 * (int(rslt) >> 13) * (int(rslt) >> 13)) >> 25
v2 = (p8 * int(rslt)) >> 19
rslt = ((int(rslt) + v1 + v2) >> 8) + (p7 << 4)
return (float(rslt) / 256)
@property
def get_humidity(self):
'''!
@brief Get humidity measurement value from register, working range (0 ~ 100 %RH)
@return Return humidity measurements, unit: %RH
'''
data = self._read_reg(BME280_HUM_DATA_MSB, 2)
raw = data[0] << 8 | data[1]
# datasheet, Trimming Coefficient listing in register map with size and sign attributes
t1, t2, t3, p1, p2, p3, p4, p5, p6, p7, p8, p9, h1, h2, h3, h4, h5, h6 = self._data_calib
self.get_temperature # update _t_fine
v1 = self._t_fine - 76800
v1 = (((((raw <<14) - (h4 << 20) - (h5 * v1)) + 16384) >> 15) * (((((((v1 * h6) >> 10) *
(((v1 * h3) >> 11) + 32768)) >> 10) + 2097152) * h2 + 8192) >> 14))
v1 = (v1 - (((((v1 >> 15) * (v1 >> 15)) >> 7) * h1) >> 4))
if v1 < 0:
v1 = 0
elif v1 > 419430400:
v1 = 419430400
return (v1 >> 12) / 1024.0
@property
def get_altitude(self):
'''!
@brief Calculate the altitude based on the atmospheric pressure measured by the sensor
@return Return altitude, unit: m
@attention If the reference value is provided before, the absolute value of the current
@n position pressure is calculated according to the calibrated sea level atmospheric pressure
'''
# see https://www.weather.gov/media/epz/wxcalc/pressureAltitude.pdf
return 44307.7 * (1 - (self.get_pressure / self.sea_level_pressure) ** 0.190284)
@property
def get_data_ready_status(self):
'''!
@brief get data ready status
@return True is data ready
'''
temp = self._read_reg(BME280_STATUS, 1)[0]
if temp & 0b00001000: # measuring[3]
return False
else:
return True
def reset(self):
'''!
@brief Reset and restart the sensor, restoring the sensor configuration to the default configuration
'''
self._write_reg(BME280_CMD_RESET, BME280_CMD_RESET_VALUE)
time.sleep(0.1)
def calibrated_absolute_difference(self, altitude):
'''!
@brief Take the given current location altitude as the reference value
@n to eliminate the absolute difference for subsequent pressure and altitude data
@param altitude Altitude in current position
@return Pass the benchmark value successfully will return ture, if failed it will return false
'''
# The altitude in meters based on the currently set sea level pressure.
ret = False
if STANDARD_SEA_LEVEL_PRESSURE_PA == self.sea_level_pressure:
self.sea_level_pressure = (self.get_pressure / pow(1.0 - (altitude / 44307.7), 5.255302))
ret = True
return ret
def set_ctrl_meas_mode(self, mode):
'''!
@brief Configure measurement mode and power mode
@param mode The measurement mode and power mode that need to be set:
@n SLEEP_MODE(Sleep mode): It will be in sleep mode by default after power-on reset. In this mode,no
@n measurement is performed and power consumption is minimal. All registers
@n are accessible for reading the chip ID and compensation coefficient.
@n FORCED_MODE(Forced mode): In this mode, the sensor will take a single measurement according to the selected
@n measurement and filtering options. After the measurement is completed, the sensor
@n will return to sleep mode, and the measurement result can be obtained in the register.
@n NORMAL_MODE(Normal mode): Continuously loop between the measurement period and the standby period.
@n The output data rates are related to the ODR mode setting.
'''
mask = 0b00000011 # mode[1:0]
self._write_reg_bits(BME280_CTRL_MEAS, mask, mode)
def set_ctrl_meas_sampling_temp(self, osrs_t):
'''!
@brief Configure the oversampling when measuring temperature (OSR:over-sampling register)
@details When the IIR filter is enabled, the temperature resolution is 20 bit.
@n When the IIR filter is disabled, the temperature resolution is 16 + (osrs_t – 1) bit,
@n e.g. 18 bit when osrs_t is set to ‘3’.
@param - osrs_t 6 temp oversampling mode:
@n BME280_TEMP_OSR_SETTINGS[0], temperature sampling×0, Skipped (output set to 0x80000)
@n BME280_TEMP_OSR_SETTINGS[1], temperature sampling×2, 16 bit
@n BME280_TEMP_OSR_SETTINGS[2], temperature sampling×4, 17 bit
@n BME280_TEMP_OSR_SETTINGS[3], temperature sampling×8, 18 bit
@n BME280_TEMP_OSR_SETTINGS[4], temperature sampling×16, 19 bit
@n BME280_TEMP_OSR_SETTINGS[5], temperature sampling×32, 20 bit
'''
mask = 0b11100000 # osrs_t[7:5]
self._write_reg_bits(BME280_CTRL_MEAS, mask, osrs_t)
def set_ctrl_meas_sampling_press(self, osrs_p):
'''!
@brief Configure the oversampling when measuring press (OSR:over-sampling register)
@details When the IIR filter is enabled, the pressure resolution is 20 bit.
@n When the IIR filter is disabled, the pressure resolution is 16 + (osrs_p – 1) bit,
@n e.g. 18 bit when osrs_p is set to ‘3’.
@param - osrs_t 6 temp oversampling mode:
@n BME280_PRESS_OSR_SETTINGS[0], pressure sampling×0, Skipped (output set to 0x80000)
@n BME280_PRESS_OSR_SETTINGS[1], pressure sampling×2, 16 bit
@n BME280_PRESS_OSR_SETTINGS[2], pressure sampling×4, 17 bit
@n BME280_PRESS_OSR_SETTINGS[3], pressure sampling×8, 18 bit
@n BME280_PRESS_OSR_SETTINGS[4], pressure sampling×16, 19 bit
@n BME280_PRESS_OSR_SETTINGS[5], pressure sampling×32, 20 bit
'''
mask = 0b00011100 # osrs_t[4:2]
self._write_reg_bits(BME280_CTRL_MEAS, mask, osrs_p)
def set_ctrl_sampling_humi(self, osrs_h):
'''!
@brief Configure the oversampling when measuring humidity (OSR:over-sampling register)
@details For the humidity measurement, oversampling is possible to reduce the noise.
@n The resolution of the humidity measurement is fixed at 16 bit ADC output.
@param - osrs_t 6 temp oversampling mode:
@n BME280_HUMI_OSR_SETTINGS[0], humidity sampling×0, Skipped (output set to 0x80000)
@n BME280_HUMI_OSR_SETTINGS[1], humidity sampling×2, 16 bit
@n BME280_HUMI_OSR_SETTINGS[2], humidity sampling×4, 17 bit
@n BME280_HUMI_OSR_SETTINGS[3], humidity sampling×8, 18 bit
@n BME280_HUMI_OSR_SETTINGS[4], humidity sampling×16, 19 bit
@n BME280_HUMI_OSR_SETTINGS[5], humidity sampling×32, 20 bit
'''
mask = 0b00000111 # osrs_t[2:0]
self._write_reg_bits(BME280_CTRL_HUM, mask, osrs_h)
def set_config_filter(self, iir_config_coef):
'''!
@brief IIR filter coefficient setting(IIR filtering)
@param - iir_config_coef Set IIR filter coefficient, configurable mode:
@n BME280_IIR_FILTER_SETTINGS[0], filter off
@n BME280_IIR_FILTER_SETTINGS[1], filter coefficient 2
@n BME280_IIR_FILTER_SETTINGS[2], filter coefficient 4
@n BME280_IIR_FILTER_SETTINGS[3], filter coefficient 8
@n BME280_IIR_FILTER_SETTINGS[4], filter coefficient 16
'''
mask = 0b00011100 # osrs_t[4:2]
self._write_reg_bits(BME280_CONFIG, mask, iir_config_coef)
def set_config_T_standby(self, odr_set):
'''!
@brief Set output data rate in subdivision/sub-sampling mode (ODR:output data rates)
@param odr_set The output data rate needs to be set, configurable mode:
@n BME280_CONFIG_STANDBY_TIME_0P5, BME280_CONFIG_STANDBY_TIME_62P5, BME280_CONFIG_STANDBY_TIME_125,
@n BME280_CONFIG_STANDBY_TIME_250, BME280_CONFIG_STANDBY_TIME_500, BME280_CONFIG_STANDBY_TIME_1000,
@n BME280_CONFIG_STANDBY_TIME_10, BME280_CONFIG_STANDBY_TIME_20
@return return configuration results
@retval True indicate configuration succeed
@retval False indicate configuration failed and remains its original state
'''
# The IIR filter coefficient.
ret = True
mask = 0b11100000 # osrs_t[7:5]
self._write_reg_bits(BME280_CONFIG, mask, odr_set)
if (self._read_reg(BME280_CONFIG, 1)[0] & 0xE0):
logger.warning("Sensor configuration error detected!")
ret = False
return ret
def _uint8_to_int(self,num):
'''!
@brief Convert the incoming uint8 type data to int type
@param num Incoming uint8 type data
@return data converted to int type
'''
if(num>127):
num = num - 256
return num
def _uint16_to_int(self,num):
'''!
@brief Convert the incoming uint16 type data to int type
@param num Incoming uint16 type data
@return data converted to int type
'''
if(num>32767):
num = num - 65536
return num
def _get_coefficients(self):
'''!
@brief Get the calibration data in the NVM register of the sensor
'''
calib = self._read_reg(BME280_CALIB_DATA_00_25, 26)
calib1 = self._read_reg(BME280_CALIB_DATA_26_41, 7)
self._data_calib = (
(calib[1] << 8) | calib[0], # T1
self._uint16_to_int((calib[3] << 8) | calib[2]), # T2
self._uint16_to_int((calib[5] << 8) | (calib[4])), # T3
(calib[7] << 8) | calib[6], # P1
self._uint16_to_int((calib[9] << 8) | calib[8]), # P2
self._uint16_to_int((calib[11] << 8) | calib[10]), # P3
self._uint16_to_int((calib[13] << 8) | calib[12]), # P4
self._uint16_to_int((calib[15] << 8) | calib[14]), # P5
self._uint16_to_int((calib[17] << 8) | calib[16]), # P6
self._uint16_to_int((calib[19] << 8) | calib[18]), # P7
self._uint16_to_int((calib[21] << 8) | calib[20]), # P8
self._uint16_to_int((calib[23] << 8) | calib[22]), # P9
calib[25], # H1
self._uint16_to_int((calib1[1] << 8) | calib1[0]), # H2
calib1[2], # H3
self._uint16_to_int((calib1[3] << 4) | (calib1[4] & 0x0F)), # H4
self._uint16_to_int((calib1[5] << 4) | ((calib1[4] >> 4) & 0x0F)), # H5
self._uint8_to_int(calib1[6]), # H6
)
def _write_reg_bits(self, reg, field, val):
'''!
@brief writes data to a register
@param reg register address
@param data written data
'''
temp = self._read_reg(reg, 1)[0]
temp &= ~field
temp |= val
self._write_reg(reg, temp)
def _write_reg(self, reg, data):
'''!
@brief writes data to a register
@param reg register address
@param data written data
'''
# Low level register writing, not implemented in base class
raise NotImplementedError()
def _read_reg(self, reg, length):
'''!
@brief read the data from the register
@param reg register address
@param length read data length
@return read data list
'''
# Low level register writing, not implemented in base class
raise NotImplementedError()
class DFRobot_BME280_I2C(DFRobot_BME280):
'''!
@brief define DFRobot_BME280_I2C base class
@details for using I2C protocol to drive the pressure sensor
'''
def __init__(self, i2c_addr=0x77, bus=1):
'''!
@brief Module I2C communication init
@param i2c_addr I2C communication address
@param bus I2C bus
'''
self._addr = i2c_addr
self._i2c = smbus.SMBus(bus)
super(DFRobot_BME280_I2C, self).__init__()
def _write_reg(self, reg, data):
'''!
@brief writes data to a register
@param reg register address
@param data written data
'''
if isinstance(data, int):
data = [data]
#logger.info(data)
self._i2c.write_i2c_block_data(self._addr, reg, data)
def _read_reg(self, reg, length):
'''!
@brief read the data from the register
@param reg register address
@param length read data length
@return read data list
'''
return self._i2c.read_i2c_block_data(self._addr, reg, length)
class DFRobot_BME280_SPI(DFRobot_BME280):
'''!
@brief define DFRobot_BME280_SPI base class
@details for using SPI protocol to drive the pressure sensor
'''
def __init__(self, cs=8, bus=0, dev=0, speed=500000):
'''!
@brief Module SPI communication init
@param cs cs chip select pin
@param bus SPI bus
@param dev SPI device number
@param speed SPI communication frequency
'''
self._cs = cs
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(self._cs, GPIO.OUT, initial=1)
GPIO.output(self._cs, GPIO.LOW)
self._spi = spidev.SpiDev()
self._spi.open(bus, dev)
self._spi.no_cs = True
self._spi.max_speed_hz = speed
super(DFRobot_BME280_SPI, self).__init__()
def _write_reg(self, reg, data):
'''!
@brief writes data to a register
@param reg register address
@param data written data
'''
if isinstance(data, int):
data = [data]
#logger.info(data)
reg_addr = [reg & 0x7f]
GPIO.output(self._cs, GPIO.LOW)
self._spi.xfer(reg_addr)
self._spi.xfer(data)
GPIO.output(self._cs, GPIO.HIGH)
def _read_reg(self, reg, length):
'''!
@brief read the data from the register
@param reg register address
@param length read data length
@return read data list
'''
reg_addr = [reg | 0x80]
GPIO.output(self._cs, GPIO.LOW)
#logger.info(reg_addr)
self._spi.xfer(reg_addr)
time.sleep(0.01)
# self._spi.readbytes(1)
rslt = self._spi.readbytes(length)
GPIO.output(self._cs, GPIO.HIGH)
return rslt
|
DFRobot/DFRobot_BME280
|
python/raspberrypi/DFRobot_BME280.py
|
DFRobot_BME280.py
|
py
| 22,063 |
python
|
en
|
code
| 9 |
github-code
|
6
|
[
{
"api_name": "logging.getLogger",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "logging.FATAL",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "logging.StreamHandler",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "logging.Formatter",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 236,
"usage_type": "call"
},
{
"api_name": "smbus.SMBus",
"line_number": 442,
"usage_type": "call"
},
{
"api_name": "RPi.GPIO.setmode",
"line_number": 481,
"usage_type": "call"
},
{
"api_name": "RPi.GPIO",
"line_number": 481,
"usage_type": "name"
},
{
"api_name": "RPi.GPIO.BCM",
"line_number": 481,
"usage_type": "attribute"
},
{
"api_name": "RPi.GPIO.setwarnings",
"line_number": 482,
"usage_type": "call"
},
{
"api_name": "RPi.GPIO",
"line_number": 482,
"usage_type": "name"
},
{
"api_name": "RPi.GPIO.setup",
"line_number": 483,
"usage_type": "call"
},
{
"api_name": "RPi.GPIO",
"line_number": 483,
"usage_type": "name"
},
{
"api_name": "RPi.GPIO.OUT",
"line_number": 483,
"usage_type": "attribute"
},
{
"api_name": "RPi.GPIO.output",
"line_number": 484,
"usage_type": "call"
},
{
"api_name": "RPi.GPIO",
"line_number": 484,
"usage_type": "name"
},
{
"api_name": "RPi.GPIO.LOW",
"line_number": 484,
"usage_type": "attribute"
},
{
"api_name": "spidev.SpiDev",
"line_number": 485,
"usage_type": "call"
},
{
"api_name": "RPi.GPIO.output",
"line_number": 501,
"usage_type": "call"
},
{
"api_name": "RPi.GPIO",
"line_number": 501,
"usage_type": "name"
},
{
"api_name": "RPi.GPIO.LOW",
"line_number": 501,
"usage_type": "attribute"
},
{
"api_name": "RPi.GPIO.output",
"line_number": 504,
"usage_type": "call"
},
{
"api_name": "RPi.GPIO",
"line_number": 504,
"usage_type": "name"
},
{
"api_name": "RPi.GPIO.HIGH",
"line_number": 504,
"usage_type": "attribute"
},
{
"api_name": "RPi.GPIO.output",
"line_number": 514,
"usage_type": "call"
},
{
"api_name": "RPi.GPIO",
"line_number": 514,
"usage_type": "name"
},
{
"api_name": "RPi.GPIO.LOW",
"line_number": 514,
"usage_type": "attribute"
},
{
"api_name": "time.sleep",
"line_number": 517,
"usage_type": "call"
},
{
"api_name": "RPi.GPIO.output",
"line_number": 520,
"usage_type": "call"
},
{
"api_name": "RPi.GPIO",
"line_number": 520,
"usage_type": "name"
},
{
"api_name": "RPi.GPIO.HIGH",
"line_number": 520,
"usage_type": "attribute"
}
] |
9202548056
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import time
import os
import h5py
import random
import numpy as np
from PIL import Image
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.data import Dataset
from torchvision import models, transforms
from superpixel.slic import SLIC
from lib.make_index import make_data_index, default_loader
from lib.image_process import RandomCropPatches, NonOverlappingCropPatches
from lib.utils import mos_rescale
class IQADataset(Dataset):
"""
IQA Dataset
"""
def __init__(self, args, status='train', loader=default_loader):
"""
:param args: arguments of the model
:param status: train/val/test
:param loader: image loader
"""
self.args = args
self.status = status
self.loader = loader
self.train_database = args.train_database
self.test_database = args.test_database
self.image_n_nodes = args.image_n_nodes
self.patch_n_nodes = args.patch_n_nodes
self.region_size = args.region_size
self.ruler = args.ruler
self.iterate = args.iterate
self.patch_size = args.patch_size
self.n_patches_train = args.n_patches_train
# Train
train_Info = h5py.File(args.train_info, 'r')
train_index = train_Info['index']
train_index = train_index[:, 0 % train_index.shape[1]]
train_ref_ids = train_Info['ref_ids'][0, :]
# Test
test_Info = h5py.File(args.test_info, 'r')
test_index = test_Info['index']
test_index = test_index[:, 0 % test_index.shape[1]]
test_ref_ids = test_Info['ref_ids'][0, :]
# Get dataset index
train_index_, test_index_ = [], []
if 'train' in status:
print('The Training Set Index is ', train_index, ' and num of Training index is ', len(train_index))
for i in range(len(train_ref_ids)):
train_index_.append(i)
self.index = train_index_
print("Number of Training Images: {}\n".format(len(self.index)))
self.mos = train_Info['subjective_scores'][0, self.index]
# self.mos_std = train_Info['subjective_scoresSTD'][0, self.index]
im_names = [train_Info[train_Info['im_names'][0, :][i]][()].tobytes()[::2].decode() for i in self.index]
self.label = []
self.im_names = []
self.dis_type = []
for idx in range(len(self.index)):
self.im_names.append(os.path.join(args.train_im_dir, im_names[idx]))
self.label.append(self.mos[idx])
if self.train_database == 'TID2008' or self.train_database == 'TID2013':
self.dis_type.append(int(im_names[idx][4:6]) - 1)
elif self.train_database == 'KADID':
self.dis_type.append(int(im_names[idx][4:6]) - 1)
elif self.train_database == 'CSIQ':
# Distortion Type
if 'AWGN' in im_names[idx]:
self.dis_type.append(0)
elif 'BLUR' in im_names[idx]:
self.dis_type.append(1)
elif 'contrast' in im_names[idx]:
self.dis_type.append(2)
elif 'fnoise' in im_names[idx]:
self.dis_type.append(3)
elif 'JPEG' in im_names[idx]:
self.dis_type.append(4)
elif 'jpeg2000' in im_names[idx]:
self.dis_type.append(5)
elif self.train_database == 'LIVE':
# Distortion Type
if 'jp2k' in im_names[idx]:
self.dis_type.append(0)
elif 'jpeg' in im_names[idx]:
self.dis_type.append(1)
elif 'wn' in im_names[idx]:
self.dis_type.append(2)
elif 'gblur' in im_names[idx]:
self.dis_type.append(3)
elif 'fastfading' in im_names[idx]:
self.dis_type.append(4)
elif self.train_database == 'SIQAD':
loc = im_names[idx].find('_')
self.dis_type.append(int(im_names[idx][loc + 1]) - 1)
elif self.train_database == 'SCID':
self.dis_type.append(int(im_names[idx][6]) - 1)
else:
print('The Testing Set Index is ', test_index, ' and num of test index is ', len(test_index))
for i in range(len(test_ref_ids)):
test_index_.append(i)
self.index = test_index_
print("Number of Testing Images: {}".format(len(self.index)), '\n')
self.mos = test_Info['subjective_scores'][0, self.index]
im_names = [test_Info[test_Info['im_names'][0, :][i]][()].tobytes()[::2].decode() for i in self.index]
self.label = []
self.im_names = []
self.dis_type = []
for idx in range(len(self.index)):
self.im_names.append(os.path.join(args.test_im_dir, im_names[idx]))
self.label.append(self.mos[idx])
self.dis_type.append(0)
def __len__(self):
return len(self.index)
def __getitem__(self, idx):
im = self.loader(self.im_names[idx])
# Get CNN input
if self.status == 'train':
cnn_input, patch, patch_graph = RandomCropPatches(im, args=self.args, transforms=True)
else:
cnn_input, patch, patch_graph = NonOverlappingCropPatches(im, args=self.args, transforms=True)
# Get labels
label = torch.as_tensor([self.label[idx], ])
# Choose whether to use distortion type or distortion level
dis_type = torch.as_tensor([self.dis_type[idx], ])
return patch, patch_graph, cnn_input, label, dis_type
|
SuperBruceJia/NLNet-IQA
|
Cross Database Evaluations/data_process/get_data.py
|
get_data.py
|
py
| 6,026 |
python
|
en
|
code
| 8 |
github-code
|
6
|
[
{
"api_name": "torch.utils.data.Dataset",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "lib.make_index.default_loader",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "h5py.File",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "h5py.File",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 80,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 139,
"usage_type": "attribute"
},
{
"api_name": "lib.image_process.RandomCropPatches",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "lib.image_process.NonOverlappingCropPatches",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "torch.as_tensor",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "torch.as_tensor",
"line_number": 159,
"usage_type": "call"
}
] |
45805692666
|
from selenium import webdriver
import math
import time
import os
import eel
eel.init('web')
def getUrl(url, volume, chapter):
if url[-1] != '/':
url += '/'
return str(url) + 'v' + str(volume) + '/c' + str(chapter)
def startDriver(url, chapterNumbers, outputFolder='', outputName='output', firstChapter=1, volumeNumber=1):
options = webdriver.FirefoxOptions()
options.add_argument('--disable-blink-features=AutomationControlled')
options.add_argument('--headless')
chapter = firstChapter
url = getUrl(url, volumeNumber, chapter)
print('Запуск парсинга...')
eel.consoleArea('Запуск парсинга...')
for i in range(firstChapter, chapterNumbers + 1):
try:
print(i)
eel.consoleArea(i)
driver = webdriver.Firefox(
executable_path='geckodriver.exe',
options=options
)
driver.get(url=url)
outputSource = 'output/' + outputFolder + '/' + outputName + '_c' + str(chapter) + '.txt'
title = 'Том '+str(volumeNumber)+' глава '+str(chapter)+'\n'
time.sleep(5)
print('Запись файла...')
eel.consoleArea('Запись файла...')
p = driver.find_element_by_xpath("//*[@class='reader-container container container_center']")
t = title + p.text
with open(outputSource, 'w', encoding='utf-8') as file:
file.write(t)
print('[' + str(chapter) + '] глава готова')
eel.consoleArea('[' + str(chapter) + '] глава готова')
url = driver.find_element_by_xpath(
"//*[@class='reader-next__btn button text-truncate button_label button_label_right']").get_attribute(
"href")
chapter += 1
except Exception as ex:
print(ex)
finally:
driver.close()
driver.quit()
# url = 'https://ranobelib.me/sss-class-suicide-hunter'
# chapterNumbers = 1
# outputFolder = 'F:/Project/Python/RanobeLib downloader/output/SSS-Class Suicide Hunter'
# outputName = 'SSS-Class Suicide Hunter'
# firstChapter = 1
# volumeNumber = 1
@eel.expose
def start(url, chapterNumbers, outputFolder, outputName, firstChapter, volumeNumber):
url = str(url)
chapterNumbers = int(chapterNumbers)
outputFolder = str(outputFolder)
outputName = str(outputName)
firstChapter = int(firstChapter)
volumeNumber = int(volumeNumber)
t0 = time.time()
if not os.path.exists('output/' + outputFolder):
os.mkdir('output/' + outputFolder)
outFile = open('output/' + outputFolder + '/outFile.txt', 'w', encoding='utf-8')
startDriver(url, chapterNumbers, outputFolder, outputName, firstChapter, volumeNumber)
t1 = time.time() - t0
print("Time elapsed: ", math.ceil(t1), "second")
eel.consoleArea("Прошло: " + str(math.ceil(t1)) + " секунд" + '\n' + "Глав сохранено: " + str(chapterNumbers))
outFile.write(
"Прошло: " + str(math.ceil(t1)) + " секунд" + '\n' + "Глав сохранено: " + str(chapterNumbers)
)
eel.start('index.html', size=(700, 700))
|
STmihan/RanobeLib-downloader
|
main.py
|
main.py
|
py
| 3,255 |
python
|
en
|
code
| 3 |
github-code
|
6
|
[
{
"api_name": "eel.init",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.FirefoxOptions",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "eel.consoleArea",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "eel.consoleArea",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.Firefox",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "eel.consoleArea",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "eel.consoleArea",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 74,
"usage_type": "attribute"
},
{
"api_name": "os.mkdir",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "math.ceil",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "eel.consoleArea",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "math.ceil",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "math.ceil",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "eel.expose",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "eel.start",
"line_number": 86,
"usage_type": "call"
}
] |
22633446316
|
import ply.lex as lex
# List of token names. This is always required
tokens = (
'PROP',
'TRUE',
'FALSE',
'NOT',
'AND',
'OR',
'IMPLY',
'X',
'U',
'F',
'G',
'R',
'LPAREN',
'RPAREN',
)
# Regular expression rules for simple tokens
t_PROP = r'[a-zA-Z_][a-zA-Z0-9_]*'
t_TRUE = r'True'
t_FALSE = r'False'
t_NOT = r'Not'
t_AND = r'And'
t_OR = r'Or'
t_IMPLY = r'Imply'
t_X = r'X'
t_U = r'U'
t_F = r'F'
t_G = r'G'
t_R = r'R'
t_LPAREN = r'\('
t_RPAREN = r'\)'
# A regular expression rule with some action code
def t_NUMBER(t):
r'\d+'
t.value = int(t.value)
return t
# Define a rule so we can track line numbers
def t_newline(t):
r'\n+'
t.lexer.lineno += len(t.value)
# A string containing ignored characters (spaces and tabs)
t_ignore = ' \t'
# Error handling rule
def t_error(t):
print("Illegal character '%s'" % t.value[0])
t.lexer.skip(1)
# Build the lexer
lexer = lex.lex()
|
AriRodriguezCruz/mcfgpr
|
parser.py
|
parser.py
|
py
| 1,058 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "ply.lex.lex",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "ply.lex",
"line_number": 58,
"usage_type": "name"
}
] |
34294677016
|
from django.contrib import admin
from django.urls import path
from final import views
urlpatterns = [
path("", views.index, name="root"),
path("about/", views.about, name="about"),
path("signup/", views.signup, name="signup"),
path("login/", views.loginUser, name="login"),
path("contact/", views.contact, name="contact"),
path("logout/", views.logoutUser, name="logout"),
path("services/", views.services, name="services")
]
|
supratim531/useless-django-app
|
final/urls.py
|
urls.py
|
py
| 455 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.urls.path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "final.views.index",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "final.views",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "final.views.about",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "final.views",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "final.views.signup",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "final.views",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "final.views.loginUser",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "final.views",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "final.views.contact",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "final.views",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "final.views.logoutUser",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "final.views",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "final.views.services",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "final.views",
"line_number": 12,
"usage_type": "name"
}
] |
31357581621
|
import os
from setuptools import setup, find_packages
from nats import __version__
this_dir = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(this_dir, 'requirements.txt')) as f:
requirements = f.read().splitlines()
setup(
name='nats-client',
version=__version__,
description='NATS client for Python 2',
long_description=
'Tornado based Python client for NATS, a lightweight, high-performance cloud native messaging system',
url='https://github.com/nats-io/nats.py2',
author='Waldemar Quevedo',
author_email='[email protected]',
license='Apache 2.0 License',
packages=['nats', 'nats.io', 'nats.protocol'],
install_requires=requirements,
zip_safe=True,
classifiers=[
'Intended Audience :: Developers',
'Programming Language :: Python',
])
|
nats-io/nats.py2
|
setup.py
|
setup.py
|
py
| 834 |
python
|
en
|
code
| 62 |
github-code
|
6
|
[
{
"api_name": "os.path.dirname",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "setuptools.setup",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "nats.__version__",
"line_number": 11,
"usage_type": "name"
}
] |
7377958665
|
from flask import Flask,request,jsonify
import numpy as np
from scipy.signal import find_peaks
import pandas as pd
app = Flask(__name__)
@app.route('/data_process',methods=['GET'])
def data_process():
dict1={}
if '[' and ']' and "," in request.args['C1 raw']:
input_control=request.args['C1 raw'].strip('[').strip(']').split(',')
#print(input_control)
input_test=request.args['T1 raw'].strip('[').strip(']').split(',')
elif ',' in request.args['C1 raw']:
input_control=request.args['C1 raw'].split(',')
print(input_control)
input_test=request.args['T1 raw'].split(',')
elif '\n' in request.args['C1 raw']:
input_control=request.args['C1 raw'].split('\n')
input_control=input_control[0:-2]
#print(input_control)
input_test=request.args['T1 raw'].split('\n')
input_test = input_test[0:-2]
else:
input_control=request.args['C1 raw'].split(' ')
#print(input_control)
input_test=request.args['T1 raw'].split(' ')
peaks1, _ = find_peaks(input_control, prominence=1,width=15)
peaks2, _ = find_peaks(input_test, prominence=1,width=15)
if len(peaks1)!=0 or len(peaks2)!=0:
print('Peak1: {} Peak2: {}'.format(peaks1, peaks2))
peak_diff = float(input_control[peaks1[0]]) - float(input_test[peaks2[0]])
percentage_change = (peak_diff) / float(input_control[peaks1[0]]) * 100
if percentage_change < 19:
result = 'Negative'
elif percentage_change >= 19:
result = 'Positive'
dict1['Result']=result
dict1['Peak C1 raw index']=str(peaks1[0])
dict1['Peak T1 raw index']=str(peaks2[0])
dict1['Peak C1 raw']=str(input_control[peaks1[0]])
dict1['Peak T1 raw']=str(input_test[peaks2[0]])
dict1['Peak Difference']=str(peak_diff)
dict1['Percentage Change']=str(percentage_change)
return jsonify(dict1)
else:
dict1['Result']='Inconclusive'
dict1['Peak C1 raw index']='0'
dict1['Peak T1 raw index']='0'
dict1['Peak C1 raw']='0'
dict1['Peak T1 raw']='0'
dict1['Peak Difference']='0'
dict1['Percentage Change']='0'
return jsonify(dict1)
if __name__ == '__main__':
app.run(debug=True)
#http://127.0.0.1:5000/data_process?control=[123456]&test=[546789]
|
Owaiskhan9654/Pelican-API-for-data-Processing-Canary-Global
|
app.py
|
app.py
|
py
| 2,388 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "flask.Flask",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "flask.request.args",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "flask.request.args",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "flask.request.args",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "flask.request.args",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "flask.request.args",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "flask.request.args",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "flask.request.args",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "flask.request.args",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "flask.request.args",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "flask.request.args",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "scipy.signal.find_peaks",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "scipy.signal.find_peaks",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 66,
"usage_type": "call"
}
] |
38903741775
|
#SIEMPRE SIEMPRE SIEMPRE que abrimos un fichero lo tenemos que CERRAR después
#Si abrimos un fichero con with, python hace el close() por nosotros :))
fichero = 'nombres2.txt' #qué tengo que poner como fichero?
#qué pasa si no está en la misma carpeta?
import pickle
import csv
with open(fichero) as f:
print(f) #qué imprime?
with open(fichero) as f: #qué ha pasado?
print(f.read())
with open(fichero,encoding='utf8') as f: #qué ha pasado?
print(f.read())
with open(fichero,encoding='utf8') as f: #por qué sale así? cómo podemos solucionarlo?
for line in f:
print(line.strip())
with open(fichero,encoding='utf8') as f:
lines=f.readlines() #lines=[Rocío,Ana...]
for l in lines:
print(l)
with open(fichero,mode='a',encoding='utf8') as f:
f.write('\nIrene') #probar primero sin \n
lista=[1,2,3,4]
with open('lista.pickle',mode='wb') as f:
pickle.dump(lista, f)
with open('lista.pickle',mode='rb') as f:
print(pickle.load(f))
with open('lista.pickle',mode='rb') as f:
x = pickle.load(f)
x[0]=9
with open('lista.pickle',mode='wb') as f:
pickle.dump(x,f)
with open('lista.pickle',mode='rb') as f:
print(pickle.load(f))
import csv
with open('personas.csv', mode='r') as fichero:
csv_reader = csv.reader(fichero, delimiter=',')#guardamos las líneas del fichero en csv_reader
contador = 0
for linea in csv_reader:#¿qué es una línea en un csv?
if contador == 0:
cabeceras = linea
contador += 1
else:
print('Línea ' + str(contador) + ':')
for i in range(len(cabeceras)):
print(cabeceras[i], '->', linea[i])
contador += 1
with open('personas.csv', mode='a', encoding='utf8', newline='') as fichero:
csv_writer = csv.writer(fichero, delimiter=',')
linea = ['Pablo','Buendía','24']
csv_writer.writerow(linea)
linea = ['Enrique', None,'15']
csv_writer.writerow(linea)
import json
diccionario = {
'nombre':'Marta',
'edad':25
}
with open('diccionario.json',mode='w',encoding='utf8') as f:
json.dump(diccionario,f)
with open('diccionario.json',mode='w',encoding='utf8') as f:
json.dump(diccionario,f)
|
rjvillen/clases-particulares-python
|
ficheros/ficheros_ejemplos_sol.py
|
ficheros_ejemplos_sol.py
|
py
| 2,343 |
python
|
es
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pickle.dump",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "pickle.dump",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "csv.reader",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "csv.writer",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 85,
"usage_type": "call"
}
] |
74186588989
|
""" Loading logic """
import sqlite3
import sys
from decimal import Decimal
from datetime import datetime
def load_data(database_uri: str, data: list) -> bool:
"""
Loads Bitcoin rate data into an SQLite database.
Args:
database_uri (str): The URI of the SQLite database.
data (list): The list containing the data to load.
Returns:
bool: True if the data was successfully loaded, False otherwise.
"""
schema = {
"database_name": "bitcoin_rate_tracker",
"schema_name": "dbo",
"table": "bitcoin_rates",
"fields": {
"unique_number": "INTEGER PRIMARY KEY AUTOINCREMENT",
"timestamp": "DATETIME2 NOT NULL",
"chart_name": "VARCHAR(10) NOT NULL",
"usd_rate": "DECIMAL(18, 4) NOT NULL",
"gbp_rate": "DECIMAL(18, 4) NOT NULL",
"eur_rate": "DECIMAL(18, 4) NOT NULL",
},
}
try:
# Prepare the data for insertion
formatted_data = [
str(item)
if isinstance(item, Decimal)
else item.isoformat()
if isinstance(item, datetime)
else str(item)
for item in data
]
# Connect to SQLite database
connection = sqlite3.connect(database_uri)
with connection:
cursor = connection.cursor()
# Create table schema
create_table_query = f"""
CREATE TABLE IF NOT EXISTS {schema['table']} (
{', '.join(f'{column} {specification}' for column, specification in schema['fields'].items())},
UNIQUE (timestamp)
);
"""
cursor.execute(create_table_query)
# Insert data into SQLite table
insert_query = f"""
INSERT INTO {schema['table']} ({', '.join(field for field in schema['fields'] if field != 'unique_number')})
VALUES ({', '.join(['?'] * (len(schema['fields']) - 1))})
ON CONFLICT (timestamp) DO UPDATE SET
{', '.join(f"{field} = excluded.{field}" for field in schema['fields'] if field not in ('unique_number', 'timestamp'))}
"""
cursor.execute(insert_query, formatted_data)
connection.commit()
# The maximum timestamp is the timestamp of the last inserted record
max_timestamp = formatted_data[0]
# Delete records older than 48 hours from the maximum timestamp in the SQLite table
delete_query = f"""
DELETE FROM {schema['table']}
WHERE datetime(timestamp) < datetime('{max_timestamp}', '-48 hours')
"""
cursor.execute(delete_query)
connection.commit()
return cursor.lastrowid > 0
except sqlite3.Error as error_msg:
print("Error: " + str(error_msg))
sys.exit()
finally:
connection.close()
|
richardogoma/bitcoin-rate-etl
|
etl/load/loader.py
|
loader.py
|
py
| 2,943 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "decimal.Decimal",
"line_number": 37,
"usage_type": "argument"
},
{
"api_name": "datetime.datetime",
"line_number": 39,
"usage_type": "argument"
},
{
"api_name": "sqlite3.connect",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "sqlite3.Error",
"line_number": 81,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 83,
"usage_type": "call"
}
] |
36345253262
|
"""
EECS 445 - Winter 2017 - Project 2
FER2013 - Skeleton
This file reads the dataset and provides a function `preprocessed_data`
that returns preprocessed images, labels
Usage: python -m data_scripts.fer2013
"""
import numpy as np
from scipy.misc import imresize
from sklearn.utils import resample
import pandas
from utils.config import get, print_if_verbose
class FER2013:
filename = ''
data_stored = False
train_images = np.zeros(0)
train_labels = np.zeros(0)
val_images = np.zeros(0)
val_labels = np.zeros(0)
test_images = np.zeros(0)
test_labels = np.zeros(0)
def __init__(self):
self.filename = get('DATA.FER_PATH')
self.data_stored = False
def get_images_labels(self, matrix):
image_row_len = len(np.fromstring(matrix[0, 1], dtype=int, sep=' '))
image_dim = int(np.sqrt(image_row_len))
# images = np.zeros((matrix.shape[0], image_dim, image_dim))
labels = matrix[:, 0]
images = []
for i in range(matrix.shape[0]):
image_row = np.fromstring(matrix[i, 1], dtype=int, sep=' ')
images.append(np.reshape(image_row, (image_dim, image_dim)))
images = np.array(images)
return images, labels
def read_csv(self):
df = pandas.read_csv(self.filename)
mat = df.as_matrix()
train_mat = mat[mat[:, 2] == 'Training', :]
val_mat = mat[mat[:, 2] == 'PublicTest', :]
test_mat = mat[mat[:, 2] == 'PrivateTest', :]
self.train_images, self.train_labels = self.get_images_labels(
train_mat)
self.val_images, self.val_labels = self.get_images_labels(val_mat)
self.test_images, self.test_labels = self.get_images_labels(test_mat)
self.data_stored = True
def balance_classes(self, images, labels, count=5000):
balanced_images, balanced_labels = [], []
unique_labels = set(labels)
for l in unique_labels:
l_idx = np.where(labels == l)[0]
l_images, l_labels = images[l_idx], labels[l_idx]
# Consistent resampling to facilitate debugging
resampled_images, resampled_labels = resample(l_images,
l_labels,
n_samples=count,
random_state=0)
balanced_images.extend(resampled_images)
balanced_labels.extend(resampled_labels)
balanced_images = np.array(balanced_images)
balanced_labels = np.array(balanced_labels)
print('---Shuffled images shape: {}'.format(balanced_images.shape))
print('---Shuffled labels shape: {}'.format(balanced_labels.shape))
assert(len(balanced_images) == len(balanced_labels))
shuffle_idx = np.random.permutation(len(balanced_images))
return balanced_images[shuffle_idx], balanced_labels[shuffle_idx]
def resize(self, images, new_size=32):
resized = []
for i in range(images.shape[0]):
resized_image = imresize(images[i],
size=(new_size, new_size),
interp='bicubic')
resized.append(resized_image)
return np.array(resized)
def preprocessed_data(self, split, dim=32, one_hot=False, balance_classes=True):
if not self.data_stored:
self.read_csv()
if split == 'train':
print_if_verbose('Loading train data...')
images, labels = self.train_images, self.train_labels
# TODO: Remove blank images
temp_im = [];
temp_lb = [];
for i in range(len(images)):
if(np.linalg.norm(images[i]) >= 1):
temp_im += [images[i]];
temp_lb += [labels[i]];
images = np.array(temp_im);
labels = np.array(temp_lb);
if balance_classes:
images, labels = self.balance_classes(images, labels, 5000)
elif split == 'val':
print_if_verbose('Loading validation data...')
images, labels = self.val_images, self.val_labels
# TODO: Remove blan k images
temp_im = [];
temp_lb = [];
for i in range(len(images)):
if(np.linalg.norm(images[i]) >= 1):
temp_im += [images[i]];
temp_lb += [labels[i]];
images = np.array(temp_im);
labels = np.array(temp_lb);
if balance_classes:
images, labels = self.balance_classes(images, labels, 500)
elif split == 'test':
print_if_verbose('Loading test data...')
images, labels = self.test_images, self.test_labels
# TODO: Remove blank images
temp_im = [];
temp_lb = [];
for i in range(len(images)):
if(np.linalg.norm(images[i]) >= 1):
temp_im += [images[i]];
temp_lb += [labels[i]];
images = np.array(temp_im);
labels = np.array(temp_lb);
if balance_classes:
images, labels = self.balance_classes(images, labels, 500)
else:
print_if_verbose('Invalid input!')
return
images = self.resize(images, dim)
# TODO: Normalize, add dimension, one-hot encoding of labels
temp_im = [];
temp_lb = [];
for i in range(len(images)):
t_im = images[i];
mu = np.mean(t_im);
t_im = np.add(t_im, -mu);
std = np.std(t_im);
t_im = np.divide(t_im, std);
temp_im.append(np.expand_dims(t_im,axis = 2))
if(one_hot):
temp_vec = [0, 0, 0, 0, 0, 0, 0];
temp_vec[labels[i]] = 1
temp_lb += [temp_vec];
images = np.array(temp_im);
if(one_hot):
labels = np.array(temp_lb);
print_if_verbose('---Images shape: {}'.format(images.shape))
print_if_verbose('---Labels shape: {}'.format(labels.shape))
return images, labels
if __name__ == '__main__':
data = FER2013()
images, labels = data.preprocessed_data('train') # 'train' or 'val' or 'test'
|
lixhuang/EECS445p2
|
data_scripts/fer2013.py
|
fer2013.py
|
py
| 6,969 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "numpy.zeros",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "utils.config.get",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "numpy.fromstring",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "numpy.fromstring",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "numpy.reshape",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "sklearn.utils.resample",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "numpy.random.permutation",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 73,
"usage_type": "attribute"
},
{
"api_name": "scipy.misc.imresize",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "utils.config.print_if_verbose",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "numpy.linalg.norm",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 95,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "utils.config.print_if_verbose",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "numpy.linalg.norm",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 110,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "utils.config.print_if_verbose",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "numpy.linalg.norm",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 125,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "utils.config.print_if_verbose",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "numpy.add",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "numpy.std",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "numpy.divide",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "numpy.expand_dims",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "utils.config.print_if_verbose",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "utils.config.print_if_verbose",
"line_number": 155,
"usage_type": "call"
}
] |
716717090
|
from plenum.common.constants import SERVICES, VALIDATOR, TARGET_NYM, DATA
from sovrin_common.roles import Roles
from stp_core.network.port_dispenser import genHa
import pytest
from sovrin_client.test.cli.helper import doSendNodeCmd
def testSuspendNode(be, do, trusteeCli, newNodeAdded):
"""
Suspend a node and then cancel suspension. Suspend while suspended
to test that there is no error
"""
newNodeVals = newNodeAdded
be(trusteeCli)
newNodeVals['newNodeData'][SERVICES] = []
doSendNodeCmd(do, newNodeVals)
# Re-suspend node
newNodeVals['newNodeData'][SERVICES] = []
doSendNodeCmd(do, newNodeVals,
expMsgs=['node already has the same data as requested'])
# Cancel suspension
newNodeVals['newNodeData'][SERVICES] = [VALIDATOR]
doSendNodeCmd(do, newNodeVals)
# Re-cancel suspension
newNodeVals['newNodeData'][SERVICES] = [VALIDATOR]
doSendNodeCmd(do, nodeVals=newNodeVals,
expMsgs=['node already has the same data as requested'])
@pytest.mark.skip(reason='INDY-133. Broken compatibility')
def testSuspendNodeWhichWasNeverActive(be, do, trusteeCli, nymAddedOut,
poolNodesStarted, trusteeMap):
"""
Add a node without services field and check that the ledger does not
contain the `services` field and check that it can be blacklisted and
the ledger has `services` as empty list
"""
newStewardSeed = '0000000000000000000KellySteward2'
newStewardIdr = 'DqCx7RFEpSUMZbV2mH89XPH6JT3jMvDNU55NTnBHsQCs'
be(trusteeCli)
do('send NYM dest={{remote}} role={role}'.format(
role=Roles.STEWARD.name),
within=5,
expect=nymAddedOut, mapper={'remote': newStewardIdr})
do('new key with seed {}'.format(newStewardSeed))
nport, cport = (_[1] for _ in genHa(2))
nodeId = '6G9QhQa3HWjRKeRmEvEkLbWWf2t7cw6KLtafzi494G4G'
newNodeVals = {
'newNodeIdr': nodeId,
'newNodeData': {'client_port': cport,
'client_ip': '127.0.0.1',
'alias': 'Node6',
'node_ip': '127.0.0.1',
'node_port': nport
}
}
doSendNodeCmd(do, newNodeVals)
for node in poolNodesStarted.nodes.values():
txn = [t for _, t in node.poolLedger.getAllTxn()][-1]
assert txn[TARGET_NYM] == nodeId
assert SERVICES not in txn[DATA]
do('new key with seed {}'.format(trusteeMap['trusteeSeed']))
newNodeVals['newNodeData'][SERVICES] = []
doSendNodeCmd(do, newNodeVals)
for node in poolNodesStarted.nodes.values():
txn = [t for _, t in node.poolLedger.getAllTxn()][-1]
assert txn[TARGET_NYM] == nodeId
assert SERVICES in txn[DATA] and txn[DATA][SERVICES] == []
|
hyperledger-archives/indy-client
|
sovrin_client/test/cli/test_node_suspension.py
|
test_node_suspension.py
|
py
| 2,820 |
python
|
en
|
code
| 18 |
github-code
|
6
|
[
{
"api_name": "plenum.common.constants.SERVICES",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "sovrin_client.test.cli.helper.doSendNodeCmd",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "plenum.common.constants.SERVICES",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "sovrin_client.test.cli.helper.doSendNodeCmd",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "plenum.common.constants.SERVICES",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "plenum.common.constants.VALIDATOR",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "sovrin_client.test.cli.helper.doSendNodeCmd",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "plenum.common.constants.SERVICES",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "plenum.common.constants.VALIDATOR",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "sovrin_client.test.cli.helper.doSendNodeCmd",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "sovrin_common.roles.Roles.STEWARD",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "sovrin_common.roles.Roles",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "stp_core.network.port_dispenser.genHa",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "sovrin_client.test.cli.helper.doSendNodeCmd",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "plenum.common.constants.TARGET_NYM",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "plenum.common.constants.SERVICES",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "plenum.common.constants.DATA",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "plenum.common.constants.SERVICES",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "sovrin_client.test.cli.helper.doSendNodeCmd",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "plenum.common.constants.TARGET_NYM",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "plenum.common.constants.SERVICES",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "plenum.common.constants.DATA",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "pytest.mark.skip",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 36,
"usage_type": "attribute"
}
] |
21215563455
|
# -*- coding: utf-8 -*-
from django.db.models import Count, Case, When
from django.db.models.functions import TruncDate
from django.utils.translation import gettext as _
from django.utils.translation import get_language
import plotly.offline as plotly
import plotly.graph_objs as go
from core.utils import duration_parts
from reports import utils
def diaperchange_intervals(changes):
"""
Create a graph showing intervals of diaper changes.
:param changes: a QuerySet of Diaper Change instances.
:returns: a tuple of the the graph's html and javascript.
"""
changes = changes.order_by("time")
intervals = []
intervals_solid = []
intervals_wet = []
last_change = changes.first()
for change in changes[1:]:
interval = change.time - last_change.time
if interval.total_seconds() > 0:
intervals.append(interval)
if change.solid:
intervals_solid.append(interval)
if change.wet:
intervals_wet.append(interval)
last_change = change
trace_solid = go.Scatter(
name=_("Solid"),
line=dict(shape="spline"),
x=list(changes.values_list("time", flat=True))[1:],
y=[i.total_seconds() / 3600 for i in intervals_solid],
hoverinfo="text",
text=[_duration_string_hms(i) for i in intervals_solid],
)
trace_wet = go.Scatter(
name=_("Wet"),
line=dict(shape="spline"),
x=list(changes.values_list("time", flat=True))[1:],
y=[i.total_seconds() / 3600 for i in intervals_wet],
hoverinfo="text",
text=[_duration_string_hms(i) for i in intervals_wet],
)
trace_total = go.Scatter(
name=_("Total"),
line=dict(shape="spline"),
x=list(changes.values_list("time", flat=True))[1:],
y=[i.total_seconds() / 3600 for i in intervals],
hoverinfo="text",
text=[_duration_string_hms(i) for i in intervals],
)
layout_args = utils.default_graph_layout_options()
layout_args["barmode"] = "stack"
layout_args["title"] = _("<b>Diaper Change Intervals</b>")
layout_args["xaxis"]["title"] = _("Date")
layout_args["xaxis"]["rangeselector"] = utils.rangeselector_date()
layout_args["yaxis"]["title"] = _("Interval (hours)")
fig = go.Figure(
{
"data": [trace_solid, trace_wet, trace_total],
"layout": go.Layout(**layout_args),
}
)
output = plotly.plot(
fig,
output_type="div",
include_plotlyjs=False,
config={"locale": get_language()},
)
return utils.split_graph_output(output)
def _duration_string_hms(duration):
"""
Format a duration string with hours, minutes and seconds. This is
intended to fit better in smaller spaces on a graph.
:returns: a string of the form Xm.
"""
h, m, s = duration_parts(duration)
return "{}h{}m{}s".format(h, m, s)
|
babybuddy/babybuddy
|
reports/graphs/diaperchange_intervals.py
|
diaperchange_intervals.py
|
py
| 2,953 |
python
|
en
|
code
| 1,766 |
github-code
|
6
|
[
{
"api_name": "plotly.graph_objs.Scatter",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "plotly.graph_objs",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "django.utils.translation.gettext",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "plotly.graph_objs.Scatter",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "plotly.graph_objs",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "django.utils.translation.gettext",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "plotly.graph_objs.Scatter",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "plotly.graph_objs",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "django.utils.translation.gettext",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "reports.utils.default_graph_layout_options",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "reports.utils",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "django.utils.translation.gettext",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.gettext",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "reports.utils.rangeselector_date",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "reports.utils",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "django.utils.translation.gettext",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "plotly.graph_objs.Figure",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "plotly.graph_objs",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "plotly.graph_objs.Layout",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "plotly.graph_objs",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "plotly.offline.plot",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "plotly.offline",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "django.utils.translation.get_language",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "reports.utils.split_graph_output",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "reports.utils",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "core.utils.duration_parts",
"line_number": 92,
"usage_type": "call"
}
] |
8631506734
|
#!/usr/bin/env python3
import argparse
import logging
import requests
import sys
from alert_autoconf.config import read_from_file
from json import JSONDecodeError
LOG_LEVEL = "DEBUG"
def parse_params() -> dict:
parser = argparse.ArgumentParser(add_help=True)
parser.add_argument(
"-u",
"--url",
default="localhost",
help="Graphite system render url",
required=False,
)
parser.add_argument(
"-c", "--config", help="Path to trigger description", required=True
)
parser.add_argument(
"-l", "--log-level", default=LOG_LEVEL, help="Log level.", required=False
)
parser.add_argument(
"-C", "--cluster",
help="Cluster name. If specified, {cluster} will be replaced with this name.",
required=False,
)
return parser.parse_args()
def main():
frmt = "[%(asctime)s] %(levelname)s:%(name)s:%(funcName)s:%(message)s"
logging.basicConfig(
level=logging.getLevelName(LOG_LEVEL),
stream=sys.stdout,
format=frmt,
datefmt="%Y-%m-%d %H:%M:%S",
)
_logger = logging.getLogger("validate")
params = parse_params()
log_level = params.log_level.upper()
if LOG_LEVEL != log_level:
logging.getLogger().setLevel(logging.getLevelName(log_level))
_logger.setLevel(logging.getLevelName(log_level))
is_valid = True
data = read_from_file(params.config, params.cluster)
for trigger in data.triggers:
for n, target in enumerate(trigger.targets):
request_params = {
"format": "json",
"target": target,
"from": "-1min",
"noNullPoints": "true",
"maxDataPoints": 1,
}
try:
r = requests.get(params.url, params=request_params)
r.json()
_logger.info('Trigger: "%s", target: "%s" OK' % (trigger.name, n))
except JSONDecodeError as e:
is_valid = False
_logger.error(
'Trigger: "%s", target: "%s" ERROR: %s' % (trigger.name, n, r.text)
)
except Exception as e:
is_valid = False
_logger.error(
'Trigger: "%s", target: "%s", exception: "%s"'
% (trigger.name, n, e)
)
if not is_valid:
sys.exit(1)
if __name__ == "__main__":
main()
|
avito-tech/alert-autoconf
|
bin/validate.py
|
validate.py
|
py
| 2,475 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "argparse.ArgumentParser",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "logging.basicConfig",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "logging.getLevelName",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "logging.getLogger",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "logging.getLogger",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "logging.getLevelName",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "logging.getLevelName",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "alert_autoconf.config.read_from_file",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "json.JSONDecodeError",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "sys.exit",
"line_number": 85,
"usage_type": "call"
}
] |
19238997232
|
from collections import deque
N = int(input())
def bfs():
cnt = -1
q = deque()
for i in range(10):
q.append(i)
while q:
s = q.popleft()
cnt += 1
if cnt == N:
print(s)
break
for i in range(s%10):
q.append(s * 10 + i)
else:
print(-1)
bfs()
|
sdh98429/dj2_alg_study
|
BAEKJOON/backtracking/b1038.py
|
b1038.py
|
py
| 344 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "collections.deque",
"line_number": 6,
"usage_type": "call"
}
] |
21884520657
|
"""Use simple rate comparisions, try predicting event rates."""
import numpy as np
import matplotlib.pyplot as plt
from frbpoppy import Survey
from tests.convenience import plot_aa_style, rel_path
ALPHAS = np.around(np.linspace(-0.2, -2.5, 7), decimals=2)
SURVEYS = ('parkes-htru', 'arecibo-palfa', 'askap-fly', 'fast-crafts')
def compare_surveys(surv1, surv2, alpha):
"""Event rate surv1 / Event rate surv2 for an alpha."""
omega = surv1.beam_size_at_fwhm/surv2.beam_size_at_fwhm
T_rec = surv1.T_rec/surv2.T_rec
gain = surv1.gain/surv2.gain
beta = surv1.beta/surv2.beta
SEFD = T_rec*beta/gain
bw = surv1.bw/surv2.bw
S_min = surv1.snr_limit/surv2.snr_limit
return omega * (SEFD * S_min)**alpha * (bw)**(-alpha/2)
def analytical_rates(surveys=SURVEYS, alphas=ALPHAS):
"""Use a analytical model to scale detection rates to various alphas."""
rates = {}
for surv in surveys:
# Get survey parameters
surv1 = Survey(surv)
surv1.set_beam('perfect', n_sidelobes=0.5)
surv2 = Survey(surveys[0])
surv2.set_beam('perfect', n_sidelobes=0.5)
# Calculate rate per alpha
rate = []
for alpha in alphas:
rate.append(compare_surveys(surv1, surv2, alpha))
rates[surv] = rate
return rates
def main():
"""Plot analytical rates."""
rates = analytical_rates()
for surv in rates:
rate = rates[surv]
plot_aa_style()
plt.plot(ALPHAS, rate, label=surv)
plt.xlabel(r'$\alpha_{\text{in}}$')
plt.ylabel(rf'Events / {SURVEYS[0]}')
plt.xlim((min(ALPHAS), max(ALPHAS)))
plt.yscale('log')
plt.legend()
plt.gca().invert_xaxis()
plt.grid(True)
plt.tight_layout()
plt.savefig(rel_path('./plots/rates_analytical.pdf'))
if __name__ == '__main__':
main()
|
TRASAL/frbpoppy
|
tests/rates/alpha_analytical.py
|
alpha_analytical.py
|
py
| 1,884 |
python
|
en
|
code
| 26 |
github-code
|
6
|
[
{
"api_name": "numpy.around",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "frbpoppy.Survey",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "frbpoppy.Survey",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "tests.convenience.plot_aa_style",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlim",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.yscale",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.gca",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.grid",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.tight_layout",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "tests.convenience.rel_path",
"line_number": 66,
"usage_type": "call"
}
] |
10790083831
|
import html
import os
import re
import requests
class DirFile:
def __init__(self, shop_data_dict, img_list, path, shop):
self.shop_data_dict = shop_data_dict
self.img_list = img_list
self.path = path
self.shop = shop
def save_to_local(self):
try:
folder_name = self.shop_data_dict["title"]
folder_name = folder_name.replace(" ", "")
folder = self.path + "/" + self.shop + "-" + folder_name
print("===================================================================")
if not os.path.exists(folder):
os.makedirs(folder)
print("파일생성")
else:
print("파일 존재")
except OSError:
print('Error : Creating folder with ' + folder)
except Exception as e:
print('Exception 에러 발생!')
print(str(e))
else:
txt_file_location = folder + "/" + folder_name + ".txt"
cleanr = re.compile("<.*?>")
print(txt_file_location)
# txt 파일 저장
with open(txt_file_location, "w", encoding="utf-8") as txtFile:
for shop_data in self.shop_data_dict.values():
txtFile.write(html.unescape(re.sub(cleanr, "", str(shop_data).replace("</p>", "\n").replace("<br />", "\n"))) + "\n\n")
print("txt 파일 저장 완료")
# 이미지 저장
key = 0
for img_file in self.img_list:
img_file_name = folder + "/" + folder_name + '_' + str(key + 1) + '.jpg'
r = requests.get(img_file)
file = open(img_file_name, "wb")
file.write(r.content)
file.close()
key += 1
print("이미지 저장 완료")
print("===================================================================")
|
kimbackdoo/Hareubang-Crawler
|
crawl/dir_file.py
|
dir_file.py
|
py
| 1,955 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "os.path.exists",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "html.unescape",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 49,
"usage_type": "call"
}
] |
33781322730
|
import copy
import datetime as dt
import decimal
import re
import pytest
import pytz
from google.cloud import bigquery
from google.cloud import bigquery_storage_v1beta1
from google.protobuf import timestamp_pb2
def _to_bq_table_ref(proto_table_ref, partition_suffix=""):
"""Converts protobuf table reference to bigquery table reference.
Args:
proto_table_ref (bigquery_storage_v1beta1.types.TableReference):
A protobuf reference to a table.
partition_suffix (str):
An optional suffix to append to the table_id, useful for selecting
partitions of ingestion-time partitioned tables.
Returns:
google.cloud.bigquery.table.TableReference
"""
return bigquery.table.TableReference.from_api_repr(
{
"projectId": proto_table_ref.project_id,
"datasetId": proto_table_ref.dataset_id,
"tableId": proto_table_ref.table_id + partition_suffix,
}
)
@pytest.mark.parametrize(
"data_format,expected_schema_type",
(
(None, "avro_schema"), # Default format (Avro).
(bigquery_storage_v1beta1.enums.DataFormat.AVRO, "avro_schema"),
(bigquery_storage_v1beta1.enums.DataFormat.ARROW, "arrow_schema"),
),
)
def test_read_rows_as_blocks_full_table(
client, project_id, small_table_reference, data_format, expected_schema_type
):
session = client.create_read_session(
small_table_reference,
"projects/{}".format(project_id),
format_=data_format,
requested_streams=1,
)
stream_pos = bigquery_storage_v1beta1.types.StreamPosition(
stream=session.streams[0]
)
schema_type = session.WhichOneof("schema")
assert schema_type == expected_schema_type
blocks = list(client.read_rows(stream_pos))
assert len(blocks) > 0
block = blocks[0]
assert block.status.estimated_row_count > 0
@pytest.mark.parametrize(
"data_format,expected_schema_type",
(
(bigquery_storage_v1beta1.enums.DataFormat.AVRO, "avro_schema"),
(bigquery_storage_v1beta1.enums.DataFormat.ARROW, "arrow_schema"),
),
)
def test_read_rows_as_rows_full_table(
client, project_id, small_table_reference, data_format, expected_schema_type
):
session = client.create_read_session(
small_table_reference,
"projects/{}".format(project_id),
format_=data_format,
requested_streams=1,
)
stream_pos = bigquery_storage_v1beta1.types.StreamPosition(
stream=session.streams[0]
)
rows = list(client.read_rows(stream_pos).rows(session))
assert len(rows) > 0
@pytest.mark.parametrize(
"data_format",
(
(bigquery_storage_v1beta1.enums.DataFormat.AVRO),
(bigquery_storage_v1beta1.enums.DataFormat.ARROW),
),
)
def test_basic_nonfiltered_read(client, project_id, table_with_data_ref, data_format):
session = client.create_read_session(
table_with_data_ref,
"projects/{}".format(project_id),
format_=data_format,
requested_streams=1,
)
stream_pos = bigquery_storage_v1beta1.types.StreamPosition(
stream=session.streams[0]
)
rows = list(client.read_rows(stream_pos).rows(session))
assert len(rows) == 5 # all table rows
def test_filtered_rows_read(client, project_id, table_with_data_ref):
read_options = bigquery_storage_v1beta1.types.TableReadOptions()
read_options.row_restriction = "age >= 50"
session = client.create_read_session(
table_with_data_ref,
"projects/{}".format(project_id),
format_=bigquery_storage_v1beta1.enums.DataFormat.AVRO,
requested_streams=1,
read_options=read_options,
)
stream_pos = bigquery_storage_v1beta1.types.StreamPosition(
stream=session.streams[0]
)
rows = list(client.read_rows(stream_pos).rows(session))
assert len(rows) == 2
@pytest.mark.parametrize(
"data_format",
(
(bigquery_storage_v1beta1.enums.DataFormat.AVRO),
(bigquery_storage_v1beta1.enums.DataFormat.ARROW),
),
)
def test_column_selection_read(client, project_id, table_with_data_ref, data_format):
read_options = bigquery_storage_v1beta1.types.TableReadOptions()
read_options.selected_fields.append("first_name")
read_options.selected_fields.append("age")
session = client.create_read_session(
table_with_data_ref,
"projects/{}".format(project_id),
format_=data_format,
requested_streams=1,
read_options=read_options,
)
stream_pos = bigquery_storage_v1beta1.types.StreamPosition(
stream=session.streams[0]
)
rows = list(client.read_rows(stream_pos).rows(session))
for row in rows:
assert sorted(row.keys()) == ["age", "first_name"]
def test_snapshot(client, project_id, table_with_data_ref, bq_client):
before_new_data = timestamp_pb2.Timestamp()
before_new_data.GetCurrentTime()
# load additional data into the table
new_data = [
{u"first_name": u"NewGuyFoo", u"last_name": u"Smith", u"age": 46},
{u"first_name": u"NewGuyBar", u"last_name": u"Jones", u"age": 30},
]
destination = _to_bq_table_ref(table_with_data_ref)
bq_client.load_table_from_json(new_data, destination).result()
# read data using the timestamp before the additional data load
session = client.create_read_session(
table_with_data_ref,
"projects/{}".format(project_id),
format_=bigquery_storage_v1beta1.enums.DataFormat.AVRO,
requested_streams=1,
table_modifiers={"snapshot_time": before_new_data},
)
stream_pos = bigquery_storage_v1beta1.types.StreamPosition(
stream=session.streams[0]
)
rows = list(client.read_rows(stream_pos).rows(session))
# verify that only the data before the timestamp was returned
assert len(rows) == 5 # all initial records
for row in rows:
assert "NewGuy" not in row["first_name"] # no new records
def test_column_partitioned_table(
client, project_id, col_partition_table_ref, bq_client
):
data = [
{"description": "Tracking established.", "occurred": "2017-02-15"},
{"description": "Look, a solar eclipse!", "occurred": "2018-02-15"},
{"description": "Fake solar eclipse reported.", "occurred": "2018-02-15"},
{"description": "1 day after false eclipse report.", "occurred": "2018-02-16"},
{"description": "1 year after false eclipse report.", "occurred": "2019-02-15"},
]
destination = _to_bq_table_ref(col_partition_table_ref)
bq_client.load_table_from_json(data, destination).result()
# Read from the table with a partition filter specified, and verify that
# only the expected data is returned.
read_options = bigquery_storage_v1beta1.types.TableReadOptions()
read_options.row_restriction = "occurred = '2018-02-15'"
session = client.create_read_session(
col_partition_table_ref,
"projects/{}".format(project_id),
format_=bigquery_storage_v1beta1.enums.DataFormat.AVRO,
requested_streams=1,
read_options=read_options,
)
assert session.streams # there should be some data to fetch
stream_pos = bigquery_storage_v1beta1.types.StreamPosition(
stream=session.streams[0]
)
rows = list(client.read_rows(stream_pos).rows(session))
assert len(rows) == 2
expected_descriptions = ("Look, a solar eclipse!", "Fake solar eclipse reported.")
for row in rows:
assert row["occurred"] == dt.date(2018, 2, 15)
assert row["description"] in expected_descriptions
@pytest.mark.parametrize(
"data_format",
(
(bigquery_storage_v1beta1.enums.DataFormat.AVRO),
(bigquery_storage_v1beta1.enums.DataFormat.ARROW),
),
)
def test_ingestion_time_partitioned_table(
client, project_id, ingest_partition_table_ref, bq_client, data_format
):
data = [{"shape": "cigar", "altitude": 1200}, {"shape": "disc", "altitude": 750}]
destination = _to_bq_table_ref(
ingest_partition_table_ref, partition_suffix="$20190809"
)
bq_client.load_table_from_json(data, destination).result()
data = [
{"shape": "sphere", "altitude": 3500},
{"shape": "doughnut", "altitude": 100},
]
destination = _to_bq_table_ref(
ingest_partition_table_ref, partition_suffix="$20190810"
)
bq_client.load_table_from_json(data, destination).result()
data = [
{"shape": "elephant", "altitude": 1},
{"shape": "rocket", "altitude": 12700},
]
destination = _to_bq_table_ref(
ingest_partition_table_ref, partition_suffix="$20190811"
)
bq_client.load_table_from_json(data, destination).result()
read_options = bigquery_storage_v1beta1.types.TableReadOptions()
read_options.row_restriction = "DATE(_PARTITIONTIME) = '2019-08-10'"
session = client.create_read_session(
ingest_partition_table_ref,
"projects/{}".format(project_id),
format_=data_format,
requested_streams=1,
read_options=read_options,
)
assert session.streams # there should be some data to fetch
stream_pos = bigquery_storage_v1beta1.types.StreamPosition(
stream=session.streams[0]
)
rows = list(client.read_rows(stream_pos).rows(session))
assert len(rows) == 2
actual_items = {(row["shape"], row["altitude"]) for row in rows}
expected_items = {("sphere", 3500), ("doughnut", 100)}
assert actual_items == expected_items
@pytest.mark.parametrize(
"data_format",
(
(bigquery_storage_v1beta1.enums.DataFormat.AVRO),
(bigquery_storage_v1beta1.enums.DataFormat.ARROW),
),
)
def test_decoding_data_types(
client, project_id, all_types_table_ref, bq_client, data_format
):
data = [
{
u"string_field": u"Price: € 9.95.",
u"bytes_field": bigquery._helpers._bytes_to_json(b"byteees"),
u"int64_field": -1085,
u"float64_field": -42.195,
u"numeric_field": "1.4142",
u"bool_field": True,
u"geography_field": '{"type": "Point", "coordinates": [-49.3028, 69.0622]}',
u"person_struct_field": {u"name": u"John", u"age": 42},
u"timestamp_field": 1565357902.017896, # 2019-08-09T13:38:22.017896
u"date_field": u"1995-03-17",
u"time_field": u"16:24:51",
u"datetime_field": u"2005-10-26T19:49:41",
u"string_array_field": [u"foo", u"bar", u"baz"],
}
]
# Explicit schema is needed to recognize bytes_field as BYTES, and not STRING.
# Since partial schemas are not supported in load_table_from_json(), a full
# schema needs to be specified.
schema = [
bigquery.SchemaField("string_field", "STRING"),
bigquery.SchemaField("bytes_field", "BYTES"),
bigquery.SchemaField("int64_field", "INT64"),
bigquery.SchemaField("float64_field", "FLOAT64"),
bigquery.SchemaField("numeric_field", "NUMERIC"),
bigquery.SchemaField("bool_field", "BOOL"),
bigquery.SchemaField("geography_field", "GEOGRAPHY"),
bigquery.SchemaField(
"person_struct_field",
"STRUCT",
fields=(
bigquery.SchemaField("name", "STRING"),
bigquery.SchemaField("age", "INT64"),
),
),
bigquery.SchemaField("timestamp_field", "TIMESTAMP"),
bigquery.SchemaField("date_field", "DATE"),
bigquery.SchemaField("time_field", "TIME"),
bigquery.SchemaField("datetime_field", "DATETIME"),
bigquery.SchemaField("string_array_field", "STRING", mode="REPEATED"),
]
job_config = bigquery.LoadJobConfig(schema=schema)
destination = _to_bq_table_ref(all_types_table_ref)
bq_client.load_table_from_json(data, destination, job_config=job_config).result()
session = client.create_read_session(
all_types_table_ref,
"projects/{}".format(project_id),
format_=data_format,
requested_streams=1,
)
assert session.streams # there should be data available
stream_pos = bigquery_storage_v1beta1.types.StreamPosition(
stream=session.streams[0]
)
rows = list(client.read_rows(stream_pos).rows(session))
expected_result = {
u"string_field": u"Price: € 9.95.",
u"bytes_field": b"byteees",
u"int64_field": -1085,
u"float64_field": -42.195,
u"numeric_field": decimal.Decimal("1.4142"),
u"bool_field": True,
u"geography_field": "POINT(-49.3028 69.0622)",
u"person_struct_field": {u"name": u"John", u"age": 42},
u"timestamp_field": dt.datetime(2019, 8, 9, 13, 38, 22, 17896, tzinfo=pytz.UTC),
u"date_field": dt.date(1995, 3, 17),
u"time_field": dt.time(16, 24, 51),
u"string_array_field": [u"foo", u"bar", u"baz"],
}
result_copy = copy.copy(rows[0])
del result_copy["datetime_field"]
assert result_copy == expected_result
# Compare datetime separately, AVRO and PYARROW return different object types,
# although they should both represent the same value.
# TODO: when fixed, change assertion to assert a datetime instance!
expected_pattern = re.compile(r"2005-10-26( |T)19:49:41")
assert expected_pattern.match(str(rows[0]["datetime_field"]))
@pytest.mark.parametrize(
"data_format",
(
(bigquery_storage_v1beta1.enums.DataFormat.AVRO),
(bigquery_storage_v1beta1.enums.DataFormat.ARROW),
),
)
def test_resuming_read_from_offset(client, project_id, data_format):
shakespeare_ref = bigquery_storage_v1beta1.types.TableReference()
shakespeare_ref.project_id = project_id
shakespeare_ref.dataset_id = "public_samples_copy"
shakespeare_ref.table_id = "shakespeare"
read_session = client.create_read_session(
shakespeare_ref,
"projects/{}".format(project_id),
format_=data_format,
requested_streams=1,
)
assert read_session.streams # there should be data available
stream_pos = bigquery_storage_v1beta1.types.StreamPosition(
stream=read_session.streams[0], offset=0
)
read_rows_stream = client.read_rows(stream_pos)
# fetch the first two batches of rows
rows_iter = iter(read_rows_stream)
some_rows = next(rows_iter)
more_rows = next(rows_iter)
# fetch the rest of the rows using the stream offset
new_stream_pos = bigquery_storage_v1beta1.types.StreamPosition(
stream=read_session.streams[0], offset=some_rows.row_count + more_rows.row_count
)
remaining_rows_count = sum(
1 for _ in client.read_rows(new_stream_pos).rows(read_session)
)
# verify that the counts match
expected_len = 164656 # total rows in shakespeare table
actual_len = remaining_rows_count + some_rows.row_count + more_rows.row_count
assert actual_len == expected_len
|
silverdev/google-cloud-python
|
bigquery_storage/tests/system/test_reader.py
|
test_reader.py
|
py
| 15,016 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "google.cloud.bigquery.table.TableReference.from_api_repr",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "google.cloud.bigquery.table",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "google.cloud.bigquery",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "google.cloud.bigquery_storage_v1beta1.types.StreamPosition",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "google.cloud.bigquery_storage_v1beta1.types",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "google.cloud.bigquery_storage_v1beta1",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "google.cloud.bigquery_storage_v1beta1.enums",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "google.cloud.bigquery_storage_v1beta1",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "google.cloud.bigquery_storage_v1beta1.enums",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "google.cloud.bigquery_storage_v1beta1",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "google.cloud.bigquery_storage_v1beta1.types.StreamPosition",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "google.cloud.bigquery_storage_v1beta1.types",
"line_number": 82,
"usage_type": "attribute"
},
{
"api_name": "google.cloud.bigquery_storage_v1beta1",
"line_number": 82,
"usage_type": "name"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 66,
"usage_type": "attribute"
},
{
"api_name": "google.cloud.bigquery_storage_v1beta1.enums",
"line_number": 69,
"usage_type": "attribute"
},
{
"api_name": "google.cloud.bigquery_storage_v1beta1",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "google.cloud.bigquery_storage_v1beta1.enums",
"line_number": 70,
"usage_type": "attribute"
},
{
"api_name": "google.cloud.bigquery_storage_v1beta1",
"line_number": 70,
"usage_type": "name"
},
{
"api_name": "google.cloud.bigquery_storage_v1beta1.types.StreamPosition",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "google.cloud.bigquery_storage_v1beta1.types",
"line_number": 105,
"usage_type": "attribute"
},
{
"api_name": "google.cloud.bigquery_storage_v1beta1",
"line_number": 105,
"usage_type": "name"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 91,
"usage_type": "attribute"
},
{
"api_name": "google.cloud.bigquery_storage_v1beta1.enums",
"line_number": 94,
"usage_type": "attribute"
},
{
"api_name": "google.cloud.bigquery_storage_v1beta1",
"line_number": 94,
"usage_type": "name"
},
{
"api_name": "google.cloud.bigquery_storage_v1beta1.enums",
"line_number": 95,
"usage_type": "attribute"
},
{
"api_name": "google.cloud.bigquery_storage_v1beta1",
"line_number": 95,
"usage_type": "name"
},
{
"api_name": "google.cloud.bigquery_storage_v1beta1.types.TableReadOptions",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "google.cloud.bigquery_storage_v1beta1.types",
"line_number": 115,
"usage_type": "attribute"
},
{
"api_name": "google.cloud.bigquery_storage_v1beta1",
"line_number": 115,
"usage_type": "name"
},
{
"api_name": "google.cloud.bigquery_storage_v1beta1.enums",
"line_number": 121,
"usage_type": "attribute"
},
{
"api_name": "google.cloud.bigquery_storage_v1beta1",
"line_number": 121,
"usage_type": "name"
},
{
"api_name": "google.cloud.bigquery_storage_v1beta1.types.StreamPosition",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "google.cloud.bigquery_storage_v1beta1.types",
"line_number": 125,
"usage_type": "attribute"
},
{
"api_name": "google.cloud.bigquery_storage_v1beta1",
"line_number": 125,
"usage_type": "name"
},
{
"api_name": "google.cloud.bigquery_storage_v1beta1.types.TableReadOptions",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "google.cloud.bigquery_storage_v1beta1.types",
"line_number": 142,
"usage_type": "attribute"
},
{
"api_name": "google.cloud.bigquery_storage_v1beta1",
"line_number": 142,
"usage_type": "name"
},
{
"api_name": "google.cloud.bigquery_storage_v1beta1.types.StreamPosition",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "google.cloud.bigquery_storage_v1beta1.types",
"line_number": 153,
"usage_type": "attribute"
},
{
"api_name": "google.cloud.bigquery_storage_v1beta1",
"line_number": 153,
"usage_type": "name"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 134,
"usage_type": "attribute"
},
{
"api_name": "google.cloud.bigquery_storage_v1beta1.enums",
"line_number": 137,
"usage_type": "attribute"
},
{
"api_name": "google.cloud.bigquery_storage_v1beta1",
"line_number": 137,
"usage_type": "name"
},
{
"api_name": "google.cloud.bigquery_storage_v1beta1.enums",
"line_number": 138,
"usage_type": "attribute"
},
{
"api_name": "google.cloud.bigquery_storage_v1beta1",
"line_number": 138,
"usage_type": "name"
},
{
"api_name": "google.protobuf.timestamp_pb2.Timestamp",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "google.protobuf.timestamp_pb2",
"line_number": 164,
"usage_type": "name"
},
{
"api_name": "google.cloud.bigquery_storage_v1beta1.enums",
"line_number": 180,
"usage_type": "attribute"
},
{
"api_name": "google.cloud.bigquery_storage_v1beta1",
"line_number": 180,
"usage_type": "name"
},
{
"api_name": "google.cloud.bigquery_storage_v1beta1.types.StreamPosition",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "google.cloud.bigquery_storage_v1beta1.types",
"line_number": 184,
"usage_type": "attribute"
},
{
"api_name": "google.cloud.bigquery_storage_v1beta1",
"line_number": 184,
"usage_type": "name"
},
{
"api_name": "google.cloud.bigquery_storage_v1beta1.types.TableReadOptions",
"line_number": 213,
"usage_type": "call"
},
{
"api_name": "google.cloud.bigquery_storage_v1beta1.types",
"line_number": 213,
"usage_type": "attribute"
},
{
"api_name": "google.cloud.bigquery_storage_v1beta1",
"line_number": 213,
"usage_type": "name"
},
{
"api_name": "google.cloud.bigquery_storage_v1beta1.enums",
"line_number": 219,
"usage_type": "attribute"
},
{
"api_name": "google.cloud.bigquery_storage_v1beta1",
"line_number": 219,
"usage_type": "name"
},
{
"api_name": "google.cloud.bigquery_storage_v1beta1.types.StreamPosition",
"line_number": 226,
"usage_type": "call"
},
{
"api_name": "google.cloud.bigquery_storage_v1beta1.types",
"line_number": 226,
"usage_type": "attribute"
},
{
"api_name": "google.cloud.bigquery_storage_v1beta1",
"line_number": 226,
"usage_type": "name"
},
{
"api_name": "datetime.date",
"line_number": 235,
"usage_type": "call"
},
{
"api_name": "google.cloud.bigquery_storage_v1beta1.types.TableReadOptions",
"line_number": 273,
"usage_type": "call"
},
{
"api_name": "google.cloud.bigquery_storage_v1beta1.types",
"line_number": 273,
"usage_type": "attribute"
},
{
"api_name": "google.cloud.bigquery_storage_v1beta1",
"line_number": 273,
"usage_type": "name"
},
{
"api_name": "google.cloud.bigquery_storage_v1beta1.types.StreamPosition",
"line_number": 286,
"usage_type": "call"
},
{
"api_name": "google.cloud.bigquery_storage_v1beta1.types",
"line_number": 286,
"usage_type": "attribute"
},
{
"api_name": "google.cloud.bigquery_storage_v1beta1",
"line_number": 286,
"usage_type": "name"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 239,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 239,
"usage_type": "attribute"
},
{
"api_name": "google.cloud.bigquery_storage_v1beta1.enums",
"line_number": 242,
"usage_type": "attribute"
},
{
"api_name": "google.cloud.bigquery_storage_v1beta1",
"line_number": 242,
"usage_type": "name"
},
{
"api_name": "google.cloud.bigquery_storage_v1beta1.enums",
"line_number": 243,
"usage_type": "attribute"
},
{
"api_name": "google.cloud.bigquery_storage_v1beta1",
"line_number": 243,
"usage_type": "name"
},
{
"api_name": "google.cloud.bigquery._helpers._bytes_to_json",
"line_number": 310,
"usage_type": "call"
},
{
"api_name": "google.cloud.bigquery._helpers",
"line_number": 310,
"usage_type": "attribute"
},
{
"api_name": "google.cloud.bigquery",
"line_number": 310,
"usage_type": "name"
},
{
"api_name": "google.cloud.bigquery.SchemaField",
"line_number": 329,
"usage_type": "call"
},
{
"api_name": "google.cloud.bigquery",
"line_number": 329,
"usage_type": "name"
},
{
"api_name": "google.cloud.bigquery.SchemaField",
"line_number": 330,
"usage_type": "call"
},
{
"api_name": "google.cloud.bigquery",
"line_number": 330,
"usage_type": "name"
},
{
"api_name": "google.cloud.bigquery.SchemaField",
"line_number": 331,
"usage_type": "call"
},
{
"api_name": "google.cloud.bigquery",
"line_number": 331,
"usage_type": "name"
},
{
"api_name": "google.cloud.bigquery.SchemaField",
"line_number": 332,
"usage_type": "call"
},
{
"api_name": "google.cloud.bigquery",
"line_number": 332,
"usage_type": "name"
},
{
"api_name": "google.cloud.bigquery.SchemaField",
"line_number": 333,
"usage_type": "call"
},
{
"api_name": "google.cloud.bigquery",
"line_number": 333,
"usage_type": "name"
},
{
"api_name": "google.cloud.bigquery.SchemaField",
"line_number": 334,
"usage_type": "call"
},
{
"api_name": "google.cloud.bigquery",
"line_number": 334,
"usage_type": "name"
},
{
"api_name": "google.cloud.bigquery.SchemaField",
"line_number": 335,
"usage_type": "call"
},
{
"api_name": "google.cloud.bigquery",
"line_number": 335,
"usage_type": "name"
},
{
"api_name": "google.cloud.bigquery.SchemaField",
"line_number": 336,
"usage_type": "call"
},
{
"api_name": "google.cloud.bigquery",
"line_number": 336,
"usage_type": "name"
},
{
"api_name": "google.cloud.bigquery.SchemaField",
"line_number": 340,
"usage_type": "call"
},
{
"api_name": "google.cloud.bigquery",
"line_number": 340,
"usage_type": "name"
},
{
"api_name": "google.cloud.bigquery.SchemaField",
"line_number": 341,
"usage_type": "call"
},
{
"api_name": "google.cloud.bigquery",
"line_number": 341,
"usage_type": "name"
},
{
"api_name": "google.cloud.bigquery.SchemaField",
"line_number": 344,
"usage_type": "call"
},
{
"api_name": "google.cloud.bigquery",
"line_number": 344,
"usage_type": "name"
},
{
"api_name": "google.cloud.bigquery.SchemaField",
"line_number": 345,
"usage_type": "call"
},
{
"api_name": "google.cloud.bigquery",
"line_number": 345,
"usage_type": "name"
},
{
"api_name": "google.cloud.bigquery.SchemaField",
"line_number": 346,
"usage_type": "call"
},
{
"api_name": "google.cloud.bigquery",
"line_number": 346,
"usage_type": "name"
},
{
"api_name": "google.cloud.bigquery.SchemaField",
"line_number": 347,
"usage_type": "call"
},
{
"api_name": "google.cloud.bigquery",
"line_number": 347,
"usage_type": "name"
},
{
"api_name": "google.cloud.bigquery.SchemaField",
"line_number": 348,
"usage_type": "call"
},
{
"api_name": "google.cloud.bigquery",
"line_number": 348,
"usage_type": "name"
},
{
"api_name": "google.cloud.bigquery.LoadJobConfig",
"line_number": 351,
"usage_type": "call"
},
{
"api_name": "google.cloud.bigquery",
"line_number": 351,
"usage_type": "name"
},
{
"api_name": "google.cloud.bigquery_storage_v1beta1.types.StreamPosition",
"line_number": 364,
"usage_type": "call"
},
{
"api_name": "google.cloud.bigquery_storage_v1beta1.types",
"line_number": 364,
"usage_type": "attribute"
},
{
"api_name": "google.cloud.bigquery_storage_v1beta1",
"line_number": 364,
"usage_type": "name"
},
{
"api_name": "decimal.Decimal",
"line_number": 375,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 379,
"usage_type": "call"
},
{
"api_name": "pytz.UTC",
"line_number": 379,
"usage_type": "attribute"
},
{
"api_name": "datetime.date",
"line_number": 380,
"usage_type": "call"
},
{
"api_name": "datetime.time",
"line_number": 381,
"usage_type": "call"
},
{
"api_name": "copy.copy",
"line_number": 385,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 392,
"usage_type": "call"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 297,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 297,
"usage_type": "attribute"
},
{
"api_name": "google.cloud.bigquery_storage_v1beta1.enums",
"line_number": 300,
"usage_type": "attribute"
},
{
"api_name": "google.cloud.bigquery_storage_v1beta1",
"line_number": 300,
"usage_type": "name"
},
{
"api_name": "google.cloud.bigquery_storage_v1beta1.enums",
"line_number": 301,
"usage_type": "attribute"
},
{
"api_name": "google.cloud.bigquery_storage_v1beta1",
"line_number": 301,
"usage_type": "name"
},
{
"api_name": "google.cloud.bigquery_storage_v1beta1.types.TableReference",
"line_number": 404,
"usage_type": "call"
},
{
"api_name": "google.cloud.bigquery_storage_v1beta1.types",
"line_number": 404,
"usage_type": "attribute"
},
{
"api_name": "google.cloud.bigquery_storage_v1beta1",
"line_number": 404,
"usage_type": "name"
},
{
"api_name": "google.cloud.bigquery_storage_v1beta1.types.StreamPosition",
"line_number": 418,
"usage_type": "call"
},
{
"api_name": "google.cloud.bigquery_storage_v1beta1.types",
"line_number": 418,
"usage_type": "attribute"
},
{
"api_name": "google.cloud.bigquery_storage_v1beta1",
"line_number": 418,
"usage_type": "name"
},
{
"api_name": "google.cloud.bigquery_storage_v1beta1.types.StreamPosition",
"line_number": 429,
"usage_type": "call"
},
{
"api_name": "google.cloud.bigquery_storage_v1beta1.types",
"line_number": 429,
"usage_type": "attribute"
},
{
"api_name": "google.cloud.bigquery_storage_v1beta1",
"line_number": 429,
"usage_type": "name"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 396,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 396,
"usage_type": "attribute"
},
{
"api_name": "google.cloud.bigquery_storage_v1beta1.enums",
"line_number": 399,
"usage_type": "attribute"
},
{
"api_name": "google.cloud.bigquery_storage_v1beta1",
"line_number": 399,
"usage_type": "name"
},
{
"api_name": "google.cloud.bigquery_storage_v1beta1.enums",
"line_number": 400,
"usage_type": "attribute"
},
{
"api_name": "google.cloud.bigquery_storage_v1beta1",
"line_number": 400,
"usage_type": "name"
}
] |
19692262120
|
#Socratica Video 22 List Comprehension
#list comprehension [expression for variable in collection if test1 and test2]
#list comprehension [expression for variable1 in collection1 and variable2 in collection2]
squares = []
for i in range(1, 11):
squares.append(i**2)
print(squares) #print [1, 4, 9, 16, 25, 36, 49, 64, 81, 100]
squares2 = [i**2 for i in range(1,11)]
print(squares2) #print [1, 4, 9, 16, 25, 36, 49, 64, 81, 100]
movies = ["Star Wars", "Gandhi", "Casablanca", "Shawshank Redemption", "Toy Story" "Gone With The Wind", "Citizen Kane", "It's A Wonderful Life", "The Wizard Of Oz", "Gattaca", "Rear Window", "Ghostbusters", "To Kill A Mockingbird", "Good Will Hunting", "2001: A Space Odyssey", "Raiders Of The Lost Ark",
"Groundhog Day", "Close Encounters Of The Third Kind"]
gmovies = []
for title in movies:
if title.startswith("G"):
gmovies.append(title)
print(gmovies) #print ['Gandhi', 'Gattaca', 'Ghostbusters', 'Good Will Hunting', 'Groundhog Day']
gmovies2 = [title for title in movies if title.startswith("G")]
print(gmovies2) #print ['Gandhi', 'Gattaca', 'Ghostbusters', 'Good Will Hunting', 'Groundhog Day']
movies2 = [("Citizen Kane", 1941), ("Spirited Away", 2001), ("It's A Wonderful Life", 1946), ("Gattaca", 1997), ("No Country For Old Men", 2007), ("Rear Window", 1954), ("The Lord Of The Rings: The Fellowship Of The Ring", 2001), ("Groundhog Day", 1993), ("Close Encounters Of The Third Kind", 1977),
("The Royal Tenenbaums", 2001), ("The Aviator", 2004), ("Raiders Of The Lost Ark", 1981)]
movies2001 = []
for key, value in movies2:
if value > 2000:
movies2001.append(key)
print(movies2001) #print ['Spirited Away', 'No Country For Old Men', 'The Lord Of The Rings: The Fellowship Of The Ring', 'The Royal Tenenbaums', 'The Aviator']
movies20012 = [title for (title, year) in movies2 if year > 2000]
print(movies20012) #print ['Spirited Away', 'No Country For Old Men', 'The Lord Of The Rings: The Fellowship Of The Ring', 'The Royal Tenenbaums', 'The Aviator']
vectorlist = [2, -3, 1]
print(4*vectorlist) #print [2, -3, 1, 2, -3, 1, 2, -3, 1, 2, -3, 1] or vectorlist+vectorlist+vectorlist+vectorlist. Adding lists concatenates them.
wvectorlist = [4*x for x in vectorlist]
print(wvectorlist) #print [8, -12, 4]
#Cartesian product a={1,3} b={x,y} a*b={(1,x), (1,y), (3,x), (3,y)}
oddlist = [1, 3, 5, 7]
evenlist = [2, 4, 6, 8]
cartesianproduct = [(a,b) for a in oddlist for b in evenlist]
print(cartesianproduct) #print [(1, 2), (1, 4), (1, 6), (1, 8), (3, 2), (3, 4), (3, 6), (3, 8), (5, 2), (5, 4), (5, 6), (5, 8), (7, 2), (7, 4), (7, 6), (7, 8)]
cartesianproductproduct = [a*b for a in oddlist for b in evenlist]
print(cartesianproductproduct) #print [2, 4, 6, 8, 6, 12, 18, 24, 10, 20, 30, 40, 14, 28, 42, 56]
print(max(cartesianproductproduct)) #print 56
list1 = [1]
list2 = [2, 3]
list3 = [4, 5, 6]
list123 = [a*b*c for a in list1 for b in list2 for c in list3]
print(list123) #print [8, 10, 12, 12, 15, 18]
listall = [[1], [2, 3], [4, 5, 6]]
listallproduct = [a for a in listall]
print(listallproduct) #print [[1], [2, 3], [4, 5, 6]]
#RM 018maximumpathsumi.py Project Euler
# lista = [3]
# listb = [7, 4]
# listc = [2, 4, 6]
# listd = [8, 5, 9, 3]
# listabcd = [print(a, b, c, d) for a in lista for b in listb for c in listc for d in listd if a+b+c+d==23] #print 3 7 6 9 which is incorrect. Want 3 7 4 9
#https://www.pythonforbeginners.com/basics/list-comprehensions-in-python
#result = [transform for iteration if filter]
result = [evennumber for evennumber in range(0,6) if evennumber % 2 == 0]
print(result) #print [0, 2, 4]
x = [i for i in range(0,11)]
print(x) #print [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
listofwords = ["this","is","a","list","of","words"]
firstletters = [eachlistofwords[0] for eachlistofwords in listofwords]
print(firstletters) #print ['t', 'i', 'a', 'l', 'o', 'w']
print(x.upper() for x in ["a","b","c"]) #print <generator object <genexpr> at 0x7facced5c360>
print([x.upper() for x in ["a","b","c"]]) #print ["A","B","C"]
extractnumbersinstring = "Hello 12345 World"
numbersasstring = [x for x in extractnumbersinstring if x.isdigit()]
print(numbersasstring) #print ['1', '2', '3', '4', '5']
stringasstring = [x for x in extractnumbersinstring if x.isalpha()]
print(stringasstring) #print ['H', 'e', 'l', 'l', 'o', 'W', 'o', 'r', 'l', 'd']
def double(x):
return x*2
print([double(x) for x in range(0,11)]) #print [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20]
print([double(x) for x in range(0,11) if x % 2 == 0]) #print [0, 4, 8, 12, 16, 20]
#https://hackernoon.com/list-comprehension-in-python-8895a785550b
lista = [1, 2, 3, 4]
listb = [2, 3, 4, 5]
matchnumbers = [a for a in lista for b in listb if a == b]
print(matchnumbers) #print [2, 3, 4]
#https://docs.python.org/3.5/tutorial/datastructures.html
listx = [1, 2, 3]
listy = [3, 1, 4]
xandytuple = [(x,y) for x in listx for y in listy if x!= y]
print(xandytuple) #print [(1, 3), (1, 4), (2, 3), (2, 1), (2, 4), (3, 1), (3, 4)]
#long way
xandytuple = []
for x in listx:
for y in listy:
if x != y:
xandytuple.append((x,y))
print(xandytuple) #print [(1, 3), (1, 4), (2, 3), (2, 1), (2, 4), (3, 1), (3, 4)]
vector = [-4, -2, 0, 2, 4]
absolutevector = [abs(eachvector) for eachvector in vector]
print(absolutevector) #print [4, 2, 0, 2, 4]
from math import pi
pinumbers = [str(round(pi, i)) for i in range(1,7)]
print(pinumbers) #print ['3.1', '3.14', '3.142', '3.1416', '3.14159', '3.141593']
#continue 5.1.4 Nested List Comprehensions 07/17/18
a1 = [[3, 0, 0], [7, 4, 0], [2, 4, 6]]
transpose = [[row[i] for row in a1] for i in range(0,3)]
print(transpose) #print [[3, 7, 2], [0, 4, 4], [0, 0, 6]]
transpose = [[row[i] for row in a1 for i in range(0,3)]]
print(transpose) #print [[3, 0, 0, 7, 4, 0, 2, 4, 6]]
#3x4 matrix 3 lists length 4
matrix = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]
onelist = []
for i in range(0,3):
for j in range(0,3):
print(matrix[i][j]) #print 1\n 2\n 3\n 5\n 6\n 7\n \n 10\n 11
onelist.append(matrix[i][j])
print(onelist) #print [1, 2, 3, 5, 6, 7, 9, 10, 11]
print([row[i] for row in matrix for i in range(0,3)]) #print [1, 2, 3, 5, 6, 7, 9, 10, 11]
#3x4 matrix 3 lists length 4
matrix = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]
onelist = []
for i in range(0,3):
for j in range(0,4):
print(matrix[i][j]) #print 1\n 2\n 3\n 4\n 5\n 6\n 7\n 8\n 9\n 10\n 11\n 12
onelist.append(matrix[i][j])
print(onelist) #print [1, 2, 3, 5, 6, 7, 9, 10, 11, 12]
print([row[i] for row in matrix for i in range(0,4)]) #print [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
#3x4 matrix 3 lists length 4
matrix = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]
transposed = []
for i in range(0,3):
transposedrow = []
for eachlist in matrix:
print(eachlist[i]) #print 1\n 5\n 9\n 2\n 6\n 10\n 3\n 7\n 11
transposedrow.append(eachlist[i])
transposed.append(transposedrow)
print(transposed) #print [[1, 5, 9], [2, 6, 10], [3, 7, 11]]
print([[row[i] for row in matrix] for i in range(0,3)]) #print [[1, 5, 9], [2, 6, 10], [3, 7, 11]]
#3x4 matrix 3 lists length 4
matrix = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]
transposed = []
for i in range(0,4):
transposedrow = []
for eachlist in matrix:
print(eachlist[i]) #print 1\n 5\n 9\n 2\n 6\n 10\n 3\n 7\n 11\n 4\n 8
transposedrow.append(eachlist[i])
transposed.append(transposedrow)
print(transposed) #print [[1, 5, 9], [2, 6, 10], [3, 7, 11], [4, 8, 12]]
print([[row[i] for row in matrix] for i in range(0,4)]) #print [[1, 5, 9], [2, 6, 10], [3, 7, 11], [4, 8, 12]]
#zip() function creates a list of tuples
matrix = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]
print(list(zip(*matrix))) #print [(1, 5, 9), (2, 6, 10), (3, 7, 11), (4, 8, 12)]
print(zip(*matrix)) #print <zip object at 0x7ff55d585f88>
letterhundreds = [("a",232), ("b",343), ("c", 543), ("d",23)]
print(letterhundreds) #print [('a', 232), ('b', 343), ('c', 543), ('d', 23)]
pivotletterhundreds = list(zip(*letterhundreds))
print(pivotletterhundreds) #print [('a', 'b', 'c', 'd'), (232, 343, 543, 23)]
print(pivotletterhundreds[0]) #print ('a', 'b', 'c', 'd')
print(list(pivotletterhundreds[0])) #print ['a', 'b', 'c', 'd']
#Python Implementing _zip_ with list comprehensions — Reuven Lerner.pdf https://lerner.co.il/2016/08/30/implementing-zip-list-comprehensions/
#Python 2’s “zip” returns a list, but Python 3’s “zip” returns an iterator object or <zip object at . . . >
#Use zip for string and tuples
sletters = "abcde"
tnumbers = (1, 2, 3, 4, 5)
tnumbersoneless = (1, 2, 3, 4)
umore = ("jack","sack","tack","back","pack")
print(zip(sletters, tnumbers)) #print <zip object at 0x7f66c7048b08>
print(list(zip(sletters, tnumbers))) #print [('a', 1), ('b', 2), ('c', 3), ('d', 4), ('e', 5)]
print(list(zip(sletters, tnumbersoneless))) #print [('a', 1), ('b', 2), ('c', 3), ('d', 4)]
print(list(zip(sletters, tnumbers, umore))) #print [('a', 1, 'jack'), ('b', 2, 'sack'), ('c', 3, 'tack'), ('d', 4, 'back'), ('e', 5, 'pack')]
#Lists to dictionaries
names = ["Tom", "Dick", "Harry"]
ages = [50, 35, 60]
print(dict(zip(names, ages))) #print {'Tom': 50, 'Dick': 35, 'Harry': 60}
from itertools import repeat
def zip_longest(*args, fillvalue="put fillvalue here"):
# zip_longest('ABCD', 'xy', fillvalue='-') --> Ax By C- D-
iterators = [iter(it) for it in args]
num_active = len(iterators)
if not num_active:
return
while True:
values = []
for i, it in enumerate(iterators):
try:
value = next(it)
except StopIteration:
num_active -= 1
if not num_active:
return
iterators[i] = repeat(fillvalue) #repeat() function is from the itertools module
value = fillvalue
values.append(value)
yield tuple(values)
print(zip_longest(sletters, tnumbersoneless)) #print <generator object zip_longest at 0x7fe1faac9a98>
print(zip(zip_longest(sletters, tnumbersoneless))) #print <zip object at 0x7f6a2b8e64c8>
print(list(zip_longest(sletters, tnumbersoneless))) #print [('a', 1), ('b', 2), ('c', 3), ('d', 4), ('e', 'put fillvalue here')]
#Zip in list comprehension
sletterstnumbers = [(sletters[i], tnumbers[i]) for i in range(0,len(sletters))]
print(sletterstnumbers) #print [('a', 1), ('b', 2), ('c', 3), ('d', 4), ('e', 5)]
#Author wanted to create a function to clean things up
def shortest_sequence_range(*args):
return range(len(sorted(args, key=len)[0]))
print([(sletters[i], tnumbers[i]) for i in shortest_sequence_range(sletters, tnumbers)]) #print [('a', 1), ('b', 2), ('c', 3), ('d', 4), ('e', 5)]
|
raymondmar61/pythonoutsidethebook
|
listcomprehension.py
|
listcomprehension.py
|
py
| 10,585 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "math.pi",
"line_number": 104,
"usage_type": "argument"
},
{
"api_name": "itertools.repeat",
"line_number": 199,
"usage_type": "call"
}
] |
73268001788
|
# Uncomment the next two lines to enable the admin:
from django.conf.urls import patterns, include, url
from productes import views
urlpatterns = patterns('',
url(r'^$', views.llistarProductes, name='llistarProductes'),
url(r'^llistarCategories/$', views.llistarCategories, name='llistarCategories'),
url(r'^llistarCategoriesAjax/$', views.llistarCategoriesAjax, name='llistarCategoriesAjax'),
url(r'^llistarProductes/$', views.llistarProductes, name='llistarProductes'),
url(r'^afegirProducte/(?P<categoria>\w+)/$', views.afegirProducte, name='afegirProducte'),
url(r'^afegirProducte/$', views.afegirProducte, name='afegirProducte'),
url(r'^afegirCategoria/$', views.afegirCategoria, name='afegirCategoria'),
url(r'^editarProducte/(?P<idProducte>\d+)/$', views.editarProducte, name='editarProducte'),
url(r'^dadesProducte/(?P<idProducte>\d+)/$', views.dadesProducte, name='dadesProducte'),
)
|
kimpa2007/restoGestio
|
tpv/productes/urls.py
|
urls.py
|
py
| 938 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.conf.urls.patterns",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "productes.views.llistarProductes",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "productes.views",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "productes.views.llistarCategories",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "productes.views",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "productes.views.llistarCategoriesAjax",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "productes.views",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "productes.views.llistarProductes",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "productes.views",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "productes.views.afegirProducte",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "productes.views",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "productes.views.afegirProducte",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "productes.views",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "productes.views.afegirCategoria",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "productes.views",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "productes.views.editarProducte",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "productes.views",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "productes.views.dadesProducte",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "productes.views",
"line_number": 14,
"usage_type": "name"
}
] |
37950158304
|
import copy
import matplotlib.pyplot as plt
from numpy import sqrt, inf
from sklearn.model_selection import train_test_split
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.optim as optim
from models.cnn import CNN
from linegeneration.generate_lines import create_image_set
from utils.logger import logger
from plot.lines_visualisation import create_multiplots
from utils.angle_operations import normalize_angle
from utils.misc import load_list_from_file
from models.model import loss_fn_dic
from utils.save_model import save_model
from utils.settings import settings
from utils.statistics import calculate_std_dev, accuracy
plt.rcParams.update({
"text.usetex": True,
"font.family": "serif"
})
def main():
if settings.synthetic:
# Read Synthetic data
N = settings.patch_size_x
n = settings.n_synthetic
X, y = create_image_set(n, N, background=True, aa=True) # n images of size NxN
image_set_test, angles_test = create_image_set(n, N, background=True, aa=True)
fig, axes = create_multiplots(image_set_test, angles_test, number_sample=n, cmap='copper')
plt.show()
# X = X.reshape(n, N*N)
# # Set title for loss evolution with respect to epoch and model name
# model_name = f'best_model_experimental_Dx_convolution_{settings.loss_fn}_batch{settings.batch_size}_epoch{settings.n_epochs}_kernel{settings.kernel_size_conv}'
#
# ax_title = f'Training on the synthetic patches (convolution) \n Learning rate: {settings.learning_rate} | Epochs: {settings.n_epochs} | Batch: {settings.batch_size} | Kernel: {settings.kernel_size_conv}'
else:
X_path = settings.x_path
y_path = settings.y_path
X, y = torch.load(X_path), [float(x) for x in load_list_from_file(y_path)]
# Set title for loss evolution with respect to epoch and model name
model_name = f'best_model_experimental_Dx_convolution_{settings.loss_fn}_batch{settings.batch_size}_epoch{settings.n_epochs}_kernel{settings.kernel_size_conv}'
ax_title = f'Training on the experimental patches (convolution + Dx) \n Learning rate: {settings.learning_rate} | Epochs: {settings.n_epochs} | Batch: {settings.batch_size} | Kernel: {settings.kernel_size_conv}'
# Load data
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.7, shuffle=True)
# Convert to 2D PyTorch tensors
X_train = torch.tensor(X_train, dtype=torch.float32)
y_train = torch.tensor(y_train, dtype=torch.float32).reshape(-1, 1)
X_test = torch.tensor(X_test, dtype=torch.float32)
y_test = torch.tensor(y_test, dtype=torch.float32).reshape(-1, 1)
logger.info('Dataset has been set up successfully')
# Set hyperparameters
batch_size = settings.batch_size
learning_rate = settings.learning_rate
num_epochs = settings.n_epochs
# Initialize model
kernel_size_conv = settings.kernel_size_conv
network = CNN(kernel_size_conv)
name_criterion = settings.loss_fn
criterion = loss_fn_dic[name_criterion]
optimizer = optim.Adam(network.parameters(), lr=learning_rate) # optimizer
logger.info('Model has been initialized')
# Move network and data tensors to device
device = network.device
network.to(device)
X_train = X_train.to(device)
y_train = y_train.to(device)
X_test = X_test.to(device)
y_test = y_test.to(device)
# We use the pre-defined number of epochs to determine how many iterations to train the network on
batch_start = torch.arange(0, len(X_train), batch_size)
# Hold the best model
best_loss = inf # init to infinity
best_weights = None
history = []
# Initialize the progress bar
pbar = tqdm(range(num_epochs), desc="Training Progress", unit="epoch")
for epoch in range(num_epochs):
network.train() # prepare model for training
# Load in the data in batches using the train_loader object
for start in batch_start:
# Take a batch
X_batch = X_train[start:start + batch_size]
y_batch = y_train[start:start + batch_size]
# Forward pass
y_pred = network(X_batch)
loss = criterion(y_pred, y_batch)
# Backward pass
optimizer.zero_grad()
loss.backward()
optimizer.step()
# Evaluate accuracy at the end of each epoch
network.eval()
y_pred = network(X_test)
mse = criterion(y_pred, y_test)
mse = float(mse)
history.append(mse)
# Update the progress bar description
pbar.update(1)
pbar.set_postfix({"Loss": mse})
if mse < best_loss:
best_loss = mse
best_weights = copy.deepcopy(network.state_dict())
acc = accuracy(y_test, y_pred)
# Close the progress bar
pbar.close()
# logger.info('Training is ended')
# Restore model and return best accuracy
network.load_state_dict(best_weights)
y_pred = network(X_test)
std = calculate_std_dev(y_pred, y_test)
# if anti_alias:
# model_name = f'best_model_cnn_synthetic_gaussian{sigma}_kernel{kernel_size_conv}_aa.pt'
# save_model(network, model_name, 'cnn')
# Plot accuracy
fig, ax = plt.subplots()
ax.set_title(ax_title, fontsize=12)
print("MSE: %.4f" % best_loss)
print("STD: % .4f" % std)
print(f"Accuracy: {acc}")
ax.set_xlabel('Epoch')
ax.set_ylabel(f'Loss ({name_criterion})')
ax.plot(history)
# Add a text box to the plot
textstr = '\n'.join((
r'$Loss = %.4f$' % (best_loss, ) + f' ({settings.loss_fn})',
r'$\sigma = %.4f$' % (std, ),
r'$Accuracy = %.4f$' % (acc, )
))
props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
ax.text(0.9, 0.9, textstr, transform=ax.transAxes, fontsize=14, ha='right', va='top', bbox=props)
# plt.savefig(f".\saved\plot\{model_name}.png")
plt.show()
# Plot some lines and patches
# if torch.cuda.is_available():
# y_pred_numpy = y_pred.cpu().cpu().detach().numpy()
# else:
# y_pred_numpy = y_pred.cpu().detach().numpy()
# fig1, axes1 = create_multiplots(X_test.cpu(), y_test.cpu(), y_pred_numpy, number_sample=16)
# plt.tight_layout()
# plt.savefig(f".\saved\plot\{model_name}_patches.png")
# plt.show()
if __name__ == '__main__':
main()
|
3it-inpaqt/line-classification-slope
|
models/run_cnn.py
|
run_cnn.py
|
py
| 6,433 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "matplotlib.pyplot.rcParams.update",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.rcParams",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "utils.settings.settings.synthetic",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "utils.settings.settings",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "utils.settings.settings.patch_size_x",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "utils.settings.settings",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "utils.settings.settings.n_synthetic",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "utils.settings.settings",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "linegeneration.generate_lines.create_image_set",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "linegeneration.generate_lines.create_image_set",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "plot.lines_visualisation.create_multiplots",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "utils.settings.settings.x_path",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "utils.settings.settings",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "utils.settings.settings.y_path",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "utils.settings.settings",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "torch.load",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "utils.misc.load_list_from_file",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "utils.settings.settings.loss_fn",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "utils.settings.settings",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "utils.settings.settings.batch_size",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "utils.settings.settings.n_epochs",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "utils.settings.settings.kernel_size_conv",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "utils.settings.settings.learning_rate",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "utils.settings.settings",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "utils.settings.settings.n_epochs",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "utils.settings.settings.batch_size",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "utils.settings.settings.kernel_size_conv",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "torch.float32",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "torch.tensor",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "torch.float32",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "torch.tensor",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "torch.float32",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_name": "torch.tensor",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "torch.float32",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "utils.logger.logger.info",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "utils.logger.logger",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "utils.settings.settings.batch_size",
"line_number": 64,
"usage_type": "attribute"
},
{
"api_name": "utils.settings.settings",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "utils.settings.settings.learning_rate",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "utils.settings.settings",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "utils.settings.settings.n_epochs",
"line_number": 66,
"usage_type": "attribute"
},
{
"api_name": "utils.settings.settings",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "utils.settings.settings.kernel_size_conv",
"line_number": 69,
"usage_type": "attribute"
},
{
"api_name": "utils.settings.settings",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "models.cnn.CNN",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "utils.settings.settings.loss_fn",
"line_number": 72,
"usage_type": "attribute"
},
{
"api_name": "utils.settings.settings",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "models.model.loss_fn_dic",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "torch.optim.Adam",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "torch.optim",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "utils.logger.logger.info",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "utils.logger.logger",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "torch.arange",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "numpy.inf",
"line_number": 89,
"usage_type": "name"
},
{
"api_name": "tqdm.tqdm",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "utils.statistics.accuracy",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "utils.statistics.calculate_std_dev",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 144,
"usage_type": "name"
},
{
"api_name": "utils.settings.settings.loss_fn",
"line_number": 155,
"usage_type": "attribute"
},
{
"api_name": "utils.settings.settings",
"line_number": 155,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 163,
"usage_type": "name"
}
] |
21098037404
|
# -*- coding: utf-8 -*-
from flask import Blueprint, g, request, redirect, url_for, current_app
import os
from invenio.ext.template.context_processor import \
register_template_context_processor, template_args
from invenio.base.decorators import templated
from invenio.modules.formatter import format_record
from invenio.modules.search.models import Collection
from invenio.modules.search.forms import EasySearchForm
from invenio.modules.search.views.search import collection
blueprint = Blueprint('main', __name__, url_prefix="",
template_folder='templates',
static_url_path='', # static url path has to be empty
# if url_prefix is empty
static_folder='static')
@blueprint.route('/', methods=['GET', 'POST'])
@templated('index.html')
def index():
""" Renders homepage. """
# legacy app support
c = request.values.get('c')
if c == current_app.config['CFG_SITE_NAME']:
return redirect(url_for('.index', ln=g.ln))
elif c is not None:
return redirect(url_for('.collection', name=c, ln=g.ln))
collection = Collection.query.get_or_404(1)
from invenio.b2share.modules.b2deposit.latest_deposits import get_latest_deposits
latest_deposits = get_latest_deposits()
func = current_app.config.get("CFG_SITE_FUNCTION") or ""
@register_template_context_processor
def index_context():
return dict(
of=request.values.get('of', collection.formatoptions[0]['code']),
easy_search_form=EasySearchForm(csrf_enabled=False),
format_record=format_record,
)
return dict(collection=collection,latest_deposits=latest_deposits, pagetitle="EUDAT B2SHARE",site_function=func)
# list all domain logos in this module's static assets folder
domain_logos = [ img for img in os.listdir(os.path.join(blueprint.static_folder, 'img'))
if img.startswith('domain-') ]
@template_args(collection)
def domain_collection_helpers():
"""Add helpers to the '/collection' templates"""
def get_domain_icon(collection_name):
"""Return the url to the given domain collection logo if it exists"""
if not collection_name or not isinstance(collection_name, basestring):
return;
logo_file_prefix = 'domain-' + collection_name.lower()
matching_logo = [ logo for logo in domain_logos if logo.startswith(logo_file_prefix)]
if len(matching_logo) == 1:
return url_for('static', filename=os.path.join('img',
matching_logo[0]))
elif len(matching_logo) > 0:
raise Exception('multiple logos matching domain collection ' +
collection_name)
return { 'get_domain_icon': get_domain_icon }
|
cjhak/b2share
|
invenio/b2share/modules/main/views.py
|
views.py
|
py
| 2,867 |
python
|
en
|
code
| null |
github-code
|
6
|
[
{
"api_name": "flask.Blueprint",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "flask.request.values.get",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "flask.request.values",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "flask.current_app.config",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "flask.current_app",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "flask.redirect",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "flask.g.ln",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "flask.redirect",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "flask.g.ln",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "invenio.modules.search.views.search.collection",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "invenio.modules.search.models.Collection.query.get_or_404",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "invenio.modules.search.models.Collection.query",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "invenio.modules.search.models.Collection",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "invenio.b2share.modules.b2deposit.latest_deposits.get_latest_deposits",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "flask.current_app.config.get",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "flask.current_app.config",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "flask.current_app",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "flask.request.values.get",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "flask.request.values",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "invenio.modules.search.views.search.collection.formatoptions",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "invenio.modules.search.views.search.collection",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "invenio.modules.search.forms.EasySearchForm",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "invenio.modules.formatter.format_record",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "invenio.ext.template.context_processor.register_template_context_processor",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "invenio.modules.search.views.search.collection",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "invenio.base.decorators.templated",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "flask.url_for",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 68,
"usage_type": "attribute"
},
{
"api_name": "invenio.ext.template.context_processor.template_args",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "invenio.modules.search.views.search.collection",
"line_number": 58,
"usage_type": "argument"
}
] |
39956836499
|
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 19 14:11:57 2022
@author: Saskia Hustinx
"""
import re
import gpt_2_simple as gpt2
import tensorflow as tf
import json
import tweepy
import random
import time
### NLP SECTION ###
def words_in_string(word_list, a_string):
return set(word_list).intersection(a_string.split())
def generate_tweets(num):
texts = gpt2.generate(sess, run_name='run2', temperature=0.9, length=50, nsamples=num, return_as_list=True)
res = []
for text in texts:
match = re.findall('<|startoftext|>(.*?[\.\?!])', text)
if len(match) == 3 and len(match[2]) > 40:
if not words_in_string(filter_list, match[2]):
res.append(match[2])
return res
### TWITTER ###
# Parse the credentials for the twitter bot
with open("cred.json", "r") as json_file:
twitter_creds = json.load(json_file)
# Set the credentials based on the credentials file
API_KEY = twitter_creds['api-key']
API_SECRET = twitter_creds['api-secret']
BEARER_TOKEN = twitter_creds['bearer-token']
ACCESS_KEY = twitter_creds['access-token']
ACCESS_SECRET = twitter_creds['access-secret']
def api():
auth = tweepy.OAuthHandler(API_KEY, API_SECRET)
auth.set_access_token(ACCESS_KEY, ACCESS_SECRET)
return tweepy.API(auth)
def fetch_dms():
# fetching the direct messages
direct_messages = api.get_direct_messages()
print(len(direct_messages))
print(direct_messages)
return direct_messages
def respond_dm(dms):
recipients = []
for dm in dms:
if dm.message_create['sender_id'] != '1577695345865367553' and dm.message_create['sender_id'] not in recipients:
recipients.append(dm.message_create['sender_id'])
res_tweets = []
while len(res_tweets) < len(recipients):
res_tweets = generate_tweets(len(recipients) + 10)
for recipient in recipients:
api.send_direct_message(recipient_id = recipient, text = str("My message for you is: \n \n"+ random.choice(res_tweets) + " ✨"))
time.sleep(5)
# main
api = api()
dms = fetch_dms()
if len(dms) > 0:
tf.config.set_visible_devices([], 'GPU')
sess = gpt2.start_tf_sess()
gpt2.load_gpt2(sess, run_name='run2')
# delete the outgoing dms
dms = fetch_dms()
for dm in dms:
if dm.message_create['sender_id'] == '1577695345865367553':
api.delete_direct_message(dm.id)
dms.remove(dm)
filter_list = ['victim', 'abuse', 'sex', 'planetary']
respond_dm(dms)
# delete reponded dms
for dm in dms:
api.delete_direct_message(dm.id)
|
sHustinx/nlp-fortune-cookie-bot
|
respond-dms.py
|
respond-dms.py
|
py
| 2,664 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "gpt_2_simple.generate",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "tweepy.OAuthHandler",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "tweepy.API",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "tensorflow.config.set_visible_devices",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "tensorflow.config",
"line_number": 85,
"usage_type": "attribute"
},
{
"api_name": "gpt_2_simple.start_tf_sess",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "gpt_2_simple.load_gpt2",
"line_number": 87,
"usage_type": "call"
}
] |
33626981361
|
from flask import Flask, request
import requests
import tkinter as tk
from tkinter import simpledialog
import pdfrw
import json
from flask_cors import CORS
import io
import base64
app = Flask(__name__)
CORS(app)
@app.route("/")
def hello_world():
ROOT = tk.Tk()
ROOT.withdraw()
# the input dialog
# USER_INP = simpledialog.askstring(title="Wallet Info",
# prompt="Please Enter your Wallet ID:")
APIKEY = '43044ac0170dc40fa60cfd249ef3307b64edbab8'
BASE = 'https://rest.cryptoapis.io/v2'
BLOCKCHAIN = 'bitcoin'
NETWORK = 'mainnet'
WALLETID = request.args.get('walletId')
print(WALLETID)
#used if we are looking for data on a particular transaction
# myTestNetWallet - 62a8e61a25a05500079dda90
# random MainnetWallet - 3R2UHDGKLQkPmAjBGbdzpET95xYV59hkyw
#TID = '4b66461bf88b61e1e4326356534c135129defb504c7acb2fd6c92697d79eb250'
#blockchain-data/bitcoin/testnet/addresses/mzYijhgmzZrmuB7wBDazRKirnChKyow4M3?
#get Bitcoin amount from wallet
with requests.Session() as session:
h = {'Content-Type': 'application/json',
'X-API-KEY': APIKEY}
r = session.get(f'https://rest.cryptoapis.io/v2/wallet-as-a-service/wallets/{WALLETID}/bitcoin/testnet',headers=h)
r.raise_for_status()
qdata = r.json()
bitCoinAmount = qdata['data']['item']['confirmedBalance']['amount']
#get Ethereum amount from wallet
with requests.Session() as session:
h1 = {'Content-Type': 'application/json',
'X-API-KEY': APIKEY}
r1 = session.get(f'https://rest.cryptoapis.io/v2/wallet-as-a-service/wallets/{WALLETID}/ethereum/ropsten',headers=h1)
r1.raise_for_status()
qdata1 = r1.json()
ethereumAmount = qdata1['data']['item']['confirmedBalance']['amount']
# #test for a wallet on the chain
# #blockchain-data/bitcoin/testnet/addresses/mzYijhgmzZrmuB7wBDazRKirnChKyow4M3?
# with requests.Session() as session:
# h = {'Content-Type': 'application/json',
# 'X-API-KEY': APIKEY}
# r = session.get(f'https://rest.cryptoapis.io/v2/blockchain-data/bitcoin/testnet/addresses/{WALLETID}', headers=h)
# r.raise_for_status()
# print(json.dumps(r.json(), indent=4, sort_keys=True))
import os
#directory = os.getcwd()
#print(os.path.abspath("AtomicTest.pdf"))
#pdf_template = "/Users/adityabora/Desktop/AtomicTest.pdf"
pdf_template = "./PortfolioAnalysisV2.pdf"
pdf_output = "output7.pdf"
#template_pdf = pdfrw.PdfReader(pdf_template) # create a pdfrw object from our template.pdf
print(os.path.exists(pdf_template))
template_pdf = pdfrw.PdfReader(pdf_template)
ANNOT_KEY = '/Annots'
ANNOT_FIELD_KEY = '/T'
ANNOT_VAL_KEY = '/V'
ANNOT_RECT_KEY = '/Rect'
SUBTYPE_KEY = '/Subtype'
WIDGET_SUBTYPE_KEY = '/Widget'
for page in template_pdf.pages:
annotations = page[ANNOT_KEY]
for annotation in annotations:
if annotation[SUBTYPE_KEY] == WIDGET_SUBTYPE_KEY:
if annotation[ANNOT_FIELD_KEY]:
key = annotation[ANNOT_FIELD_KEY][1:-1]
print(key)
from datetime import date
data_dict = {
'Risk': '3.8',
'BitcoinAmount': bitCoinAmount,
'EthAmount': ethereumAmount,
'USDCAmount': '30',
'RiskGPA': '3.7'
}
def fill_pdf(input_pdf_path, data_dict):
template_pdf = pdfrw.PdfReader(input_pdf_path)
for page in template_pdf.pages:
annotations = page[ANNOT_KEY]
for annotation in annotations:
if annotation[SUBTYPE_KEY] == WIDGET_SUBTYPE_KEY:
if annotation[ANNOT_FIELD_KEY]:
key = annotation[ANNOT_FIELD_KEY][1:-1]
if key in data_dict.keys():
if type(data_dict[key]) == bool:
if data_dict[key] == True:
annotation.update(pdfrw.PdfDict(
AS=pdfrw.PdfName('Yes')))
else:
annotation.update(
pdfrw.PdfDict(V='{}'.format(data_dict[key]))
)
annotation.update(pdfrw.PdfDict(AP=''))
# pdfrw.PdfWriter().write(output_pdf_path, template_pdf)
buf = io.BytesIO()
pdfrw.PdfWriter().write(buf, template_pdf)
buf.seek(0)
return base64.encodebytes(buf.read()).decode()
data = fill_pdf(pdf_template, data_dict)
template_pdf.Root.AcroForm.update(pdfrw.PdfDict(NeedAppearances=pdfrw.PdfObject('true'))) # NEW
return data
|
MHSiles/yoloco-be
|
other/main-2.py
|
main-2.py
|
py
| 4,824 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "flask.Flask",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "flask_cors.CORS",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "tkinter.Tk",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "flask.request.args.get",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "requests.Session",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "requests.Session",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 76,
"usage_type": "attribute"
},
{
"api_name": "pdfrw.PdfReader",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "pdfrw.PdfReader",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "pdfrw.PdfDict",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "pdfrw.PdfName",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "pdfrw.PdfDict",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "pdfrw.PdfDict",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "io.BytesIO",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "pdfrw.PdfWriter",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "base64.encodebytes",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "pdfrw.PdfDict",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "pdfrw.PdfObject",
"line_number": 129,
"usage_type": "call"
}
] |
5592159173
|
from .http import *
from .abc import User, Channel
from .channels import DMChannel
import asyncio as aio
class Client:
"""
Base class for interacting with discord
"""
def __init__(self, token: str):
self.http = HTTPClient(token)
self.event_loop = aio.new_event_loop()
aio.set_event_loop(self.event_loop)
self.events = {
"ready":None,
"tick":None
}
def event(self, _coro):
"""
Override event
=====
event_name: can be 'ready' to initiate on login
"""
self.events[_coro.__name__] = _coro
return _coro
async def login(self) -> User:
"""
Get the bot's userdata
"""
response = await self.http.connect(Route("GET", "users/@me"))
if self.events["ready"]:
await self.events["ready"]()
user = User()
await user.from_dictionary(response)
return user
async def run(self):
await self.login()
while True:
await aio.sleep(0.05)
await self.events["tick"]()
async def send_typing(self, channel:Channel):
response = await self.http.connect(Route("POST", f"channels/{channel.id}/typing"))
return response
async def get_user(self, id:int) -> User:
"""
Get userdata from ID
"""
response = await self.http.connect(Route("GET", f"users/{id}"))
user = User()
await user.from_dictionary(response)
return user
async def get_channel(self, id: int, dm:bool=False):
"""
Get a channel
"""
url = f"channels/@me/{id}" if dm else f"channels/{id}"
response = await self.http.connect(Route("GET", url))
channel = Channel() if not dm else DMChannel()
await channel.from_dictionary(response)
channel.bot_caller = self
return channel
async def close_connection(self):
await self.http.close_session()
|
ledanne/descapede
|
diswrap/client.py
|
client.py
|
py
| 2,060 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "asyncio.new_event_loop",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "asyncio.set_event_loop",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "abc.User",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "abc.User",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "asyncio.sleep",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "abc.Channel",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "abc.User",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "abc.User",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "abc.Channel",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "channels.DMChannel",
"line_number": 87,
"usage_type": "call"
}
] |
28156207354
|
import torch
from projects.thre3ingan.singans.networks import Thre3dGenerator
from torch.backends import cudnn
cudnn.benchmark = True
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def test_thre3d_generator() -> None:
batch_size = 1
random_input = torch.randn(batch_size, 128, 64, 64, 64).to(device)
network = Thre3dGenerator().to(device)
print(network)
output = network(random_input)
assert output.shape == (batch_size, 8, 64, 64, 64)
|
akanimax/3inGAN
|
projects/thre3ingan/singans/tests/test_networks.py
|
test_networks.py
|
py
| 491 |
python
|
en
|
code
| 3 |
github-code
|
6
|
[
{
"api_name": "torch.backends.cudnn.benchmark",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "torch.backends.cudnn",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "torch.device",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "torch.randn",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "projects.thre3ingan.singans.networks.Thre3dGenerator",
"line_number": 15,
"usage_type": "call"
}
] |
39400883437
|
import boto3
import pickle
from typing import Any, Tuple
import logging
from re import sub
import pandas as pd
import numpy as np
from sklearn.metrics import average_precision_score
from sklearn.model_selection import StratifiedShuffleSplit
import xgboost as xgb
# ----- Class for uploading and downloading Python objects to and from S3 ---- #
class S3Pickle:
"""
A class for uploading and downloading Python objects to and from S3.
"""
def __init__(self, s3_client=None):
"""
Constructor for S3Pickle class.
Parameters
----------
s3_client : _type_, optional
A boto3 S3 client. The default is None.
"""
if s3_client is None:
self.s3_client = boto3.client('s3')
else:
self.s3_client = s3_client
def upload_pickle(self, obj: Any, bucket_name: str, key_name: str) -> None:
"""
Upload a Python object to S3 as a pickle byte string.
Parameters
----------
obj : Any
A Python object.
bucket_name : str
S3 bucket name.
key_name : str
S3 key name.
"""
# Serialize the object to a pickle byte string
pickle_byte_string = pickle.dumps(obj)
# Upload the pickle byte string to S3
self.s3_client.put_object(Body=pickle_byte_string, Bucket=bucket_name, Key=key_name)
return None
def download_pickle(self, bucket_name: str, key_name: str) -> Any:
"""
Download a Python object from S3 as a pickle byte string.
Parameters
----------
bucket_name : str
S3 bucket name.
key_name : str
S3 key name.
"""
# Download the pickle byte string from S3
response = self.s3_client.get_object(Bucket=bucket_name, Key=key_name)
pickle_byte_string = response['Body'].read()
# Deserialize the pickle byte string to a Python object
obj = pickle.loads(pickle_byte_string)
return obj
# ----------------------------------- Data ----------------------------------- #
def load_data(data_s3_url: str, logger: logging.Logger = None) -> Tuple[pd.DataFrame, np.ndarray]:
"""
Load data from S3 bucket and return X and y.
Parameters
----------
data_s3_url : str
S3 url of data.
logger : logging.Logger
Logger object.
Returns
-------
Tuple[pd.DataFrame, np.ndarray]
Feature matrix and target array.
"""
data = pd.read_csv(
data_s3_url,
index_col=0
)
# Drop ID column and 'churn category' column (not useful for prediction)
data.drop(['Customer ID', 'Churn Category'], axis=1, inplace=True)
# Change column names to lower case and relace white spaces with underscore
data.columns = [sub('\s', '_', col.lower()) for col in data.columns]
X, y = data.drop(['churn_value'], axis=1), data.churn_value.values
if logger is not None:
logger.info('Data Loaded')
logger.info(f'The shape of training set: {(X.shape, y.shape)}')
return X, y
# ----------------------- Custom metric for evaluation ----------------------- #
def weighted_ap_score(predt: np.ndarray, data: np.ndarray) -> Tuple[str, float]:
y_true = data
y_score = predt
weighted_ap_score = average_precision_score(y_true=y_true, y_score=y_score, average='weighted', pos_label=1)
return 'avgAP', weighted_ap_score
# ------------------------ Stratified train/test split ----------------------- #
def stratified_split(X_train: pd.DataFrame, y_train: np.ndarray) -> Tuple[pd.DataFrame, np.ndarray, pd.DataFrame, np.ndarray]:
"""
Split the training set into train and validation sets, stratifying on the target variable.
Parameters
----------
X_train : pd.DataFrame
Training features.
y_train : np.ndarray
Training target.
Returns
-------
Tuple[pd.DataFrame, np.ndarray, pd.DataFrame, np.ndarray]
X_train, y_train, X_val, y_val.
"""
ssf = StratifiedShuffleSplit(n_splits=1, test_size=0.2)
for train_index, val_index in ssf.split(X_train, y_train):
X_train, X_val = X_train.iloc[train_index], X_train.iloc[val_index]
y_train, y_val = y_train[train_index], y_train[val_index]
return X_train, y_train, X_val, y_val
|
YangWu1227/python-for-machine-learning
|
tree_based/projects/telco_churn_sagemaker/src/custom_utils.py
|
custom_utils.py
|
py
| 4,461 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "boto3.client",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "typing.Any",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "pickle.dumps",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "pickle.loads",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "typing.Any",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "logging.Logger",
"line_number": 78,
"usage_type": "attribute"
},
{
"api_name": "pandas.read_csv",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "typing.Tuple",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",
"line_number": 78,
"usage_type": "attribute"
},
{
"api_name": "numpy.ndarray",
"line_number": 78,
"usage_type": "attribute"
},
{
"api_name": "numpy.ndarray",
"line_number": 115,
"usage_type": "attribute"
},
{
"api_name": "sklearn.metrics.average_precision_score",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "typing.Tuple",
"line_number": 115,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",
"line_number": 123,
"usage_type": "attribute"
},
{
"api_name": "numpy.ndarray",
"line_number": 123,
"usage_type": "attribute"
},
{
"api_name": "sklearn.model_selection.StratifiedShuffleSplit",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "typing.Tuple",
"line_number": 123,
"usage_type": "name"
}
] |
27259802510
|
"""We are the captains of our ships, and we stay 'till the end. We see our stories through.
"""
"""513. Find Bottom Left Tree Value [Two Passes]
"""
from collections import deque
class TreeNode:
def __init__(self, val):
self.val = val
self.left = None
self.right = None
class Solution:
def height(self, root):
if not root:
return 0
return 1 + max(self.height(root.left), self.height(root.right))
def findBottomLeftValue(self, root):
final_row = self.height(root)
queue = deque()
queue.append((root, 1))
while queue:
node, level = queue.popleft()
if level == final_row:
return node.val
if node.left:
queue.append((node.left, level+1))
if node.right:
queue.append((node.right, level+1))
|
asperaa/back_to_grind
|
Trees/bottom_left_tree.py
|
bottom_left_tree.py
|
py
| 890 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "collections.deque",
"line_number": 25,
"usage_type": "call"
}
] |
2018426878
|
import unittest
import sys
import os
import tempfile
import shutil
from appliapps.flow.branch import Branch
from appliapps.flow.collate import Collate
from appliapps.flow.merge import Merge
from appliapps.flow.split import Split
class Test(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.tdir = tempfile.mkdtemp(dir=".")
os.chdir(cls.tdir)
with open("input.ini", "w") as f:
f.write("""COMMENT = comm,ent
SOMEKEY = some, key
LOG_LEVEL = INFO
LOG_STORAGE = memory""")
def test1_branch(self):
sys.argv = ['--INPUT', 'input.ini', '--BRANCH', 'tandem.ini', 'omssa.ini', '--COMMENT', 'kommentar']
Branch.main()
assert os.path.exists('tandem.ini')
assert os.path.exists('omssa.ini')
def test2_collate(self):
sys.argv = ['--COLLATE', 'tandem.ini', 'omssa.ini', '--OUTPUT', 'collate.ini']
Collate.main()
assert os.path.exists('collate.ini')
def test3_split(self):
sys.argv = ['--INPUT', 'input.ini', '--SPLIT', 'split.ini', '--SPLIT_KEY', 'SOMEKEY']
Split.main()
assert os.path.exists('split.ini_0')
assert os.path.exists('split.ini_1')
def test4_merge(self):
sys.argv = ['--MERGE', 'split.ini', '--MERGED', 'merged.ini']
Merge.main()
assert os.path.exists('merged.ini_0')
@classmethod
def tearDownClass(cls):
os.chdir("..")
shutil.rmtree(cls.tdir)
|
lcb/applicake
|
tests/test_flow.py
|
test_flow.py
|
py
| 1,454 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "unittest.TestCase",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "tempfile.mkdtemp",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.chdir",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "appliapps.flow.branch.Branch.main",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "appliapps.flow.branch.Branch",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "os.path.exists",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "appliapps.flow.collate.Collate.main",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "appliapps.flow.collate.Collate",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "os.path.exists",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "appliapps.flow.split.Split.main",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "appliapps.flow.split.Split",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "os.path.exists",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "appliapps.flow.merge.Merge.main",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "appliapps.flow.merge.Merge",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "os.path.exists",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "os.chdir",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "shutil.rmtree",
"line_number": 50,
"usage_type": "call"
}
] |
32108920209
|
import json
from typing import List
import mlflow
import pandas
from pandas import DataFrame
class SpambugInference(mlflow.pyfunc.PythonModel):
"""
Inference code copied from MLFlow bugs.py
"""
def __init__(self, extraction_pipeline, clf, le):
self.extraction_pipeline = extraction_pipeline
self.clf = clf
self.le = le
def predict(self, context, bugs: List[str]):
"""
Args:
context ([type]): MLflow context where the model artifact is stored.
model_input ([type]): the input data to fit into the model.
"""
bugs = [json.loads(s) for s in bugs]
probs = self.classify(bugs, True)
indexes = probs.argmax(axis=-1)
suggestions = self.le.inverse_transform(indexes)
return {"probs": probs, "indexes": indexes, "suggestions": suggestions}
def classify(
self,
items,
probabilities=False
):
assert items is not None
assert (
self.extraction_pipeline is not None and self.clf is not None
), "The module needs to be initialized first"
if not isinstance(items, list):
items = [items]
assert isinstance(items[0], (dict, tuple))
X = self.extraction_pipeline.transform(lambda: items)
if probabilities:
classes = self.clf.predict_proba(X)
else:
classes = self.clf.predict(X)
classes = self.overwrite_classes(items, classes, probabilities)
return classes
def overwrite_classes(self, bugs, classes, probabilities):
for i, bug in enumerate(bugs):
if "@mozilla" in bug["creator"]:
if probabilities:
classes[i] = [1.0, 0.0]
else:
classes[i] = 0
return classes
|
mozilla/mlops-platform-spike-library
|
bugbug/mlflow/bugbug/trackers/spambug_inference.py
|
spambug_inference.py
|
py
| 1,839 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "mlflow.pyfunc",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "typing.List",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "json.loads",
"line_number": 23,
"usage_type": "call"
}
] |
75246891067
|
import collections
import copy
import numpy as np
import mindspore as ms
from mindspore import nn, ops
from mindspore import Tensor, Parameter
from mindspore.nn.layer.normalization import _BatchNorm
from mindspore.nn.probability.distribution import Normal
from mindspore_gl import Graph
from mindspore_gl import GNNCell
from mindspore_gl.nn import AvgPooling
class SparseDispatcher():
"""Helper for implementing a mixture of experts.
The purpose of this class is to create input minibatches for the
experts and to combine the results of the experts to form a unified
output tensor.
There are two functions:
dispatch - take an input Tensor and create input Tensors for each expert.
combine - take output Tensors from each expert and form a combined output
Tensor. Outputs from different experts for the same batch element are
summed together, weighted by the provided "gates".
The class is initialized with a "gates" Tensor, which specifies which
batch elements go to which experts, and the weights to use when combining
the outputs. Batch element b is sent to expert e iff gates[b, e] != 0.
The inputs and outputs are all two-dimensional [batch, depth].
Caller is responsible for collapsing additional dimensions prior to
calling this class and reshaping the output to the original shape.
See common_layers.reshape_like().
Example use:
gates: a float32 `Tensor` with shape `[batch_size, num_experts]`
inputs: a float32 `Tensor` with shape `[batch_size, input_size]`
experts: a list of length `num_experts` containing sub-networks.
dispatcher = SparseDispatcher(num_experts, gates)
expert_inputs = dispatcher.dispatch(inputs)
expert_outputs = [experts[i](expert_inputs[i]) for i in range(num_experts)]
outputs = dispatcher.combine(expert_outputs)
The preceding code sets the output for a particular example b to:
output[b] = Sum_i(gates[b, i] * experts[i](inputs[b]))
This class takes advantage of sparsity in the gate matrix by including in the
`Tensor`s for expert i only the batch elements for which `gates[b, i] > 0`.
"""
def __init__(self, num_experts, gates):
"""Create a SparseDispatcher."""
self._gates = gates
self._num_experts = num_experts
# sort experts
sorted_experts, index_sorted_experts = ops.nonzero(gates).sort(0)
# drop indices
_, self._expert_index = sorted_experts.split(1, axis=1)
# get according batch index for each expert
self._batch_index = ops.nonzero(gates)[index_sorted_experts[:, 1], 0]
self._batch_index = self._batch_index.int()
# calculate num samples that each expert gets
self._part_sizes = (gates > 0).sum(0).asnumpy().tolist()
# expand gates to match with self._batch_index
gates_exp = gates[ops.flatten(self._batch_index, start_dim=0)]
self._nonzero_gates = ops.gather_elements(gates_exp, 1, self._expert_index)
self.cat = ops.Concat(axis=0)
def dispatch(self, inp):
"""Create one input Tensor for each expert.
The `Tensor` for a expert `i` contains the slices of `inp` corresponding
to the batch elements `b` where `gates[b, i] > 0`.
Args:
inp: a `Tensor` of shape "[batch_size, <extra_input_dims>]`
Returns:
a list of `num_experts` `Tensor`s with shapes
`[expert_batch_size_i, <extra_input_dims>]`.
"""
if isinstance(inp, list):
inps, result = [], []
for index in self._batch_index:
inps.append(inp[index])
i = 0
for index in self._part_sizes:
result.append(inps[i:i + index])
i += index
return result
# assigns samples to experts whose gate is nonzero
# expand according to batch index so we can just split by _part_sizes
inp_exp = inp[self._batch_index].squeeze(1)
return ops.split(inp_exp, self._part_sizes, axis=0)
def combine(self, expert_out, multiply_by_gates=True):
"""Sum together the expert output, weighted by the gates.
The slice corresponding to a particular batch element `b` is computed
as the sum over all experts `i` of the expert output, weighted by the
corresponding gate values. If `multiply_by_gates` is set to False, the
gate values are ignored.
Args:
expert_out: a list of `num_experts` `Tensor`s, each with shape
`[expert_batch_size_i, <extra_output_dims>]`.
multiply_by_gates: a boolean
Returns:
a `Tensor` with shape `[batch_size, <extra_output_dims>]`.
"""
# apply exp to expert outputs, so we are not longer in log space
stitched = self.cat(expert_out).exp()
if multiply_by_gates:
stitched = ops.mul(stitched, self._nonzero_gates)
zeros = ops.zeros((self._gates.shape[0], expert_out[-1].shape[1]))
# combine samples that have been processed by the same k experts
combined = ops.index_add(zeros, self._batch_index, stitched.float(), 0)
# add eps to all zero values in order to avoid nans when going back to log space
combined[combined == 0] = np.finfo(float).eps
# back to log space
return combined.log()
def expert_to_gates(self):
"""Gate values corresponding to the examples in the per-expert `Tensor`s.
Returns:
a list of `num_experts` one-dimensional `Tensor`s with type `tf.float32`
and shapes `[expert_batch_size_i]`
"""
# split nonzero gates for each expert
return ops.split(self._nonzero_gates, self._part_sizes, dim=0)
class EDISMOE(nn.Cell):
"""Call a Sparsely gated mixture of experts layer with 1-layer Feed-Forward networks as experts.
Args:
input_size: integer - size of the input
output_size: integer - size of the input
num_experts: an integer - number of experts
hidden_size: an integer - hidden size of the experts
noisy_gating: a boolean
k: an integer - how many experts to use for each batch element
"""
def __init__(self, input_size, num_experts, hidden_size, noisy_gating, k,
block_num, embedding_size, filter_num, out_dim):
super(EDISMOE, self).__init__()
self.noisy_gating = noisy_gating
self.num_experts = num_experts
self.input_size = input_size
self.hidden_size = hidden_size
self.k = k
# instantiate experts
models = []
for _ in range(self.num_experts):
models.append(MGraphDTA(block_num, embedding_size, filter_num, out_dim))
self.experts = nn.CellList(models)
self.w_gate = Parameter(ops.zeros((input_size, num_experts), dtype=ms.float32), requires_grad=True)
self.w_noise = Parameter(ops.zeros((input_size, num_experts), dtype=ms.float32), requires_grad=True)
self.softplus = ops.Softplus()
self.softmax = nn.Softmax(axis=1)
self.mea = Parameter(Tensor(np.array([0.0]), ms.float32), requires_grad=False)
self.std = Parameter(Tensor(np.array([1.0]), ms.float32), requires_grad=False)
def cv_squared(self, x):
"""The squared coefficient of variation of a sample.
Useful as a loss to encourage a positive distribution to be more uniform.
Epsilons added for numerical stability.
Returns 0 for an empty Tensor.
Args:
x: a `Tensor`.
Returns:
a `Scalar`.
"""
eps = 1e-10
# if only num_experts = 1
if ops.shape(x)[0] == 1:
return Tensor(np.array([0]), dtype=x.dtype)
return x.float().var(ddof=True) / (x.float().mean() ** 2 + eps)
def noisy_top_k_gating(self, x, train, noise_epsilon=1e-2):
"""Noisy top-k gating.
Args:
x: input Tensor with shape [batch_size, input_size]
train: a boolean - we only add noise at training time.
noise_epsilon: a float
Returns:
gates: a Tensor with shape [batch_size, num_experts]
load: a Tensor with shape [num_experts]
"""
clean_logits = x @ self.w_gate
if self.noisy_gating and train:
raw_noise_stddev = x @ self.w_noise
noise_stddev = ((self.softplus(raw_noise_stddev) + noise_epsilon))
noisy_logits = clean_logits + (ops.randn_like(clean_logits) * noise_stddev)
logits = noisy_logits
else:
logits = clean_logits
# calculate topk + 1 that will be needed for the noisy gates
top_logits, top_indices = logits.topk(min(self.k + 1, self.num_experts), dim=1)
top_k_logits = top_logits[:, :self.k]
top_k_indices = top_indices[:, :self.k]
top_k_gates = self.softmax(top_k_logits)
zeros = ops.zeros_like(logits)
gates = ops.scatter(zeros, 1, top_k_indices, top_k_gates)
if self.noisy_gating and self.k < self.num_experts and train:
load = (self._prob_in_top_k(clean_logits, noisy_logits, noise_stddev, top_logits)).sum(0)
else:
load = self._gates_to_load(gates)
return gates, load
def construct(self, data, x, loss_coef=1e-2):
"""Args:
x: tensor shape [batch_size, input_size]
train: a boolean scalar.
loss_coef: a scalar - multiplier on load-balancing losses
Returns:
y: a tensor with shape [batch_size, output_size].
extra_training_loss: a scalar. This should be added into the overall
training loss of the model. The backpropagation of this loss
encourages all experts to be approximately equally used across a batch.
"""
gates, load = self.noisy_top_k_gating(x, self.training)
# calculate importance loss
importance = gates.sum(0)
#
loss = self.cv_squared(importance) + self.cv_squared(load)
loss *= loss_coef
dispatcher = SparseDispatcher(self.num_experts, gates)
expert_outputs = [self.experts[i](*copy.deepcopy(data))[0] for i in range(self.num_experts)]
out_experts = []
for i, out in enumerate(expert_outputs):
out_experts.append(dispatcher.dispatch(out)[i].unsqueeze(1))
y = dispatcher.combine(out_experts)
return y, loss
def _gates_to_load(self, gates):
"""Compute the true load per expert, given the gates.
The load is the number of examples for which the corresponding gate is >0.
Args:
gates: a `Tensor` of shape [batch_size, n]
Returns:
a float32 `Tensor` of shape [n]
"""
return (gates > 0).sum(axis=0)
def _prob_in_top_k(self, clean_values, noisy_values, noise_stddev, noisy_top_values):
"""Helper function to NoisyTopKGating.
Computes the probability that value is in top k, given different random noise.
This gives us a way of backpropagating from a loss that balances the number
of times each expert is in the top k experts per example.
In the case of no noise, pass in None for noise_stddev, and the result will
not be differentiable.
Args:
clean_values: a `Tensor` of shape [batch, n].
noisy_values: a `Tensor` of shape [batch, n]. Equal to clean values plus
normally distributed noise with standard deviation noise_stddev.
noise_stddev: a `Tensor` of shape [batch, n], or None
noisy_top_values: a `Tensor` of shape [batch, m].
"values" Output of tf.top_k(noisy_top_values, m). m >= k+1
Returns:
a `Tensor` of shape [batch, n].
"""
batch = ms.ops.shape(clean_values)[0]
m = ms.ops.shape(noisy_top_values)[1]
top_values_flat = ops.flatten(noisy_top_values, start_dim=0)
threshold_positions_if_in = ops.arange(batch, device=clean_values.device) * m + self.k
threshold_if_in = ops.unsqueeze(ops.gather_elements(top_values_flat, 0, threshold_positions_if_in), 1)
is_in = ms.Tensor.gt(noisy_values, threshold_if_in)
threshold_positions_if_out = threshold_positions_if_in - 1
threshold_if_out = ops.unsqueeze(ops.gather_elements(top_values_flat, 0, threshold_positions_if_out), 1)
# is each value currently in the top k.
normal = Normal(self.mea, self.std)
prob_if_in = normal.cdf((clean_values - threshold_if_in) / noise_stddev)
prob_if_out = normal.cdf((clean_values - threshold_if_out) / noise_stddev)
prob = ms.Tensor.where(is_in, prob_if_in, prob_if_out)
return prob
class Conv1dReLU(nn.Cell):
'''
kernel_size=3, stride=1, padding=1
kernel_size=5, stride=1, padding=2
kernel_size=7, stride=1, padding=3
'''
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0):
super().__init__()
self.inc = nn.SequentialCell(
nn.Conv1d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride,
pad_mode="pad", padding=padding, has_bias=True),
nn.ReLU()
)
def construct(self, x):
return self.inc(x)
class StackCNN(nn.Cell):
"""cnn"""
def __init__(self, layer_num, in_channels, out_channels, kernel_size, stride=1, padding=0):
super().__init__()
d = collections.OrderedDict()
d["conv_layer0"] = Conv1dReLU(in_channels, out_channels, kernel_size=kernel_size, stride=stride,
padding=padding)
for layer_idx in range(layer_num - 1):
d[f"conv_layer{layer_idx + 1}"] = Conv1dReLU(out_channels, out_channels, kernel_size=kernel_size,
stride=stride, padding=padding)
d['pool_layer'] = nn.AdaptiveMaxPool1d(1)
self.inc = nn.SequentialCell(d)
def construct(self, x):
y = self.inc(x)
y = ops.squeeze(y, axis=-1)
return y
class TargetRepresentation(nn.Cell):
"""target representation"""
def __init__(self, block_num, embedding_num):
super().__init__()
self.block_list = nn.CellList()
for block_idx in range(block_num):
self.block_list.append(StackCNN(block_idx + 1, embedding_num, 96, 3))
self.cat = ops.Concat(axis=-1)
self.linear = nn.Dense(block_num * 96, 96)
def construct(self, x):
feats = [block(x) for block in self.block_list]
x = self.cat(feats)
x = self.linear(x)
return x
class GraphConv(GNNCell):
"""GCN"""
def __init__(self,
in_feat_size: int,
out_size: int,
bias: bool = True):
super().__init__()
assert isinstance(in_feat_size, int) and in_feat_size > 0, "in_feat_size must be positive int"
assert isinstance(out_size, int) and out_size > 0, "out_size must be positive int"
self.in_feat_size = in_feat_size
self.out_size = out_size
in_feat_size = (in_feat_size, in_feat_size)
self.lin_rel = nn.Dense(in_feat_size[0], out_size, has_bias=bias)
self.lin_root = nn.Dense(in_feat_size[1], out_size, has_bias=False)
def construct(self, x, g: Graph):
"""
Construct function for GraphConv.
"""
x = ops.Squeeze()(x)
x_r = x
g.set_vertex_attr({"x": x})
for v in g.dst_vertex:
v.x = g.sum([u.x for u in v.innbs])
x = [v.x for v in g.dst_vertex]
x = self.lin_rel(x)
x = self.lin_root(x_r) + x
return x
class NodeLevelBatchNorm(_BatchNorm):
r"""
Applies Batch Normalization over a batch of graph data.
Shape:
- Input: [batch_nodes_dim, node_feature_dim]
- Output: [batch_nodes_dim, node_feature_dim]
batch_nodes_dim: all nodes of a batch graph
"""
def __init__(self, num_features, eps=1e-5, momentum=0.9, affine=True,
use_batch_statistics=None):
super(NodeLevelBatchNorm, self).__init__(
num_features, eps, momentum, affine, use_batch_statistics=use_batch_statistics)
def construct(self, input_data):
exponential_average_factor = self.momentum
return ops.batch_norm(
input_data, self.moving_mean, self.moving_variance, self.gamma, self.beta,
self.training,
exponential_average_factor, self.eps)
class GraphConvBn(nn.Cell):
"""GCB"""
def __init__(self, in_channels, out_channels):
super().__init__()
self.conv = GraphConv(in_channels, out_channels)
self.norm = NodeLevelBatchNorm(out_channels)
self.relu = nn.ReLU()
def construct(self, data):
x, edge_index, atom_n, edge_n = data['x'], data['edge_index'], data['atom_n'], data['edge_n']
y = self.conv(x, edge_index[0], edge_index[1], atom_n, edge_n)
mask_tmp = ops.ExpandDims()(data['x_mask'], -1)
y = y * mask_tmp
y = self.norm(y)
data['x'] = self.relu(y)
data['x'] = data['x'] * mask_tmp
return data
class DenseLayer(nn.Cell):
"""Dense Layer"""
def __init__(self, num_input_features, growth_rate=32, bn_size=4):
super().__init__()
self.conv1 = GraphConvBn(num_input_features, int(growth_rate * bn_size))
self.conv2 = GraphConvBn(int(growth_rate * bn_size), growth_rate)
self.cat = ops.Concat(axis=1)
def bn_function(self, data):
concated_features = self.cat(data['x'])
data['x'] = concated_features
data = self.conv1(data)
return data
def construct(self, data):
if isinstance(data['x'], Tensor):
data['x'] = [data['x']]
data = self.bn_function(data)
data = self.conv2(data)
return data
class DenseBlock(nn.Cell):
"""Dense Block"""
def __init__(self, num_layers, num_input_features, growth_rate=32, bn_size=4):
super().__init__()
self.dense_layer = nn.CellList()
for i in range(num_layers):
layer = DenseLayer(num_input_features + i * growth_rate, growth_rate, bn_size)
self.dense_layer.append(layer)
self.cat = ops.Concat(axis=1)
def construct(self, data):
features = [data['x']]
for layer in self.dense_layer:
data = layer(data)
features.append(data['x'])
data['x'] = features
data['x'] = self.cat(data['x'])
return data
class GraphDenseNet(nn.Cell):
"""Graph Dense Network"""
def __init__(self, out_dim, num_input_features, block_config, bn_sizes, growth_rate=32):
super().__init__()
d = collections.OrderedDict()
d['conv0'] = GraphConvBn(num_input_features, 32)
num_input_features = 32
for i, num_layers in enumerate(block_config):
block = DenseBlock(
num_layers, num_input_features, growth_rate=growth_rate, bn_size=bn_sizes[i])
d[f'block{i + 1}'] = block
num_input_features += int(num_layers * growth_rate)
trans = GraphConvBn(num_input_features, num_input_features // 2)
d[f'transition{i + 1}'] = trans
num_input_features = num_input_features // 2
self.features = nn.SequentialCell(d)
self.atom_num = 144
self.edge_num = 320
self.mean_pooling = AvgPooling()
self.classifier = nn.Dense(num_input_features, out_dim)
def construct(self, data):
"""Graph Dense Network"""
batch_size = ops.shape(data['target'])[0]
data['atom_n'] = self.atom_num * batch_size
data['edge_n'] = self.edge_num * batch_size
graph_mask = np.ones(batch_size).tolist()
graph_mask.append(0)
graph_mask = np.array(graph_mask)
graph_mask = ms.Tensor(graph_mask, ms.int32)
data = self.features(data)
x = self.mean_pooling(data['x'], data['edge_index'][0], data['edge_index'][1], data['atom_n'], data['edge_n'],
data['batch'], data['batch'], graph_mask)
x = x[:batch_size]
x = self.classifier(x)
return x
class MGraphDTA(nn.Cell):
"""MGraphDTA"""
def __init__(self, block_num, embedding_size=128, filter_num=32, out_dim=1):
super().__init__()
self.protein_encoder = TargetRepresentation(block_num, embedding_size)
self.ligand_encoder = GraphDenseNet(num_input_features=22, out_dim=filter_num * 3,
block_config=[8, 8, 8], bn_sizes=[2, 2, 2])
self.cat = ops.Concat(axis=1)
self.classifier = nn.SequentialCell(
nn.Dense(filter_num * 3 * 2, 1024),
nn.ReLU(),
nn.Dropout(0.9),
nn.Dense(1024, 1024),
nn.ReLU(),
nn.Dropout(0.9),
nn.Dense(1024, 256),
nn.ReLU(),
nn.Dropout(0.9),
nn.Dense(256, out_dim)
)
def construct(self, x, edge_attr, edge_index, target, batch, x_mask=None):
"""MGraphDTA"""
target = target.float()
target = ops.reshape(target, (-1, 1200, 33))
target = ops.permute(target, (0, 2, 1))
protein_x = self.protein_encoder(target)
data = {'batch': batch, 'x': x, 'edge_attr': edge_attr, 'edge_index': edge_index, 'target': target,
'x_mask': x_mask}
ligand_x = self.ligand_encoder(data)
feature = self.cat((protein_x, ligand_x))
out = self.classifier(feature)
return out, feature
|
shuoliu0-0/EDIS
|
model.py
|
model.py
|
py
| 21,685 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "mindspore.ops.nonzero",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "mindspore.ops",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "mindspore.ops.nonzero",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "mindspore.ops",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "mindspore.ops.flatten",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "mindspore.ops",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "mindspore.ops.gather_elements",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "mindspore.ops",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "mindspore.ops.Concat",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "mindspore.ops",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "mindspore.ops.split",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "mindspore.ops",
"line_number": 91,
"usage_type": "name"
},
{
"api_name": "mindspore.ops.mul",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "mindspore.ops",
"line_number": 110,
"usage_type": "name"
},
{
"api_name": "mindspore.ops.zeros",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "mindspore.ops",
"line_number": 111,
"usage_type": "name"
},
{
"api_name": "mindspore.ops.index_add",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "mindspore.ops",
"line_number": 113,
"usage_type": "name"
},
{
"api_name": "numpy.finfo",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "mindspore.ops.split",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "mindspore.ops",
"line_number": 126,
"usage_type": "name"
},
{
"api_name": "mindspore.nn.Cell",
"line_number": 129,
"usage_type": "attribute"
},
{
"api_name": "mindspore.nn",
"line_number": 129,
"usage_type": "name"
},
{
"api_name": "mindspore.nn.CellList",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "mindspore.nn",
"line_number": 152,
"usage_type": "name"
},
{
"api_name": "mindspore.Parameter",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "mindspore.ops.zeros",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "mindspore.ops",
"line_number": 153,
"usage_type": "name"
},
{
"api_name": "mindspore.float32",
"line_number": 153,
"usage_type": "attribute"
},
{
"api_name": "mindspore.Parameter",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "mindspore.ops.zeros",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "mindspore.ops",
"line_number": 154,
"usage_type": "name"
},
{
"api_name": "mindspore.float32",
"line_number": 154,
"usage_type": "attribute"
},
{
"api_name": "mindspore.ops.Softplus",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "mindspore.ops",
"line_number": 156,
"usage_type": "name"
},
{
"api_name": "mindspore.nn.Softmax",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "mindspore.nn",
"line_number": 157,
"usage_type": "name"
},
{
"api_name": "mindspore.Parameter",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "mindspore.Tensor",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "mindspore.float32",
"line_number": 158,
"usage_type": "attribute"
},
{
"api_name": "mindspore.Parameter",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "mindspore.Tensor",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "mindspore.float32",
"line_number": 159,
"usage_type": "attribute"
},
{
"api_name": "mindspore.ops.shape",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "mindspore.ops",
"line_number": 174,
"usage_type": "name"
},
{
"api_name": "mindspore.Tensor",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "mindspore.ops.randn_like",
"line_number": 192,
"usage_type": "call"
},
{
"api_name": "mindspore.ops",
"line_number": 192,
"usage_type": "name"
},
{
"api_name": "mindspore.ops.zeros_like",
"line_number": 203,
"usage_type": "call"
},
{
"api_name": "mindspore.ops",
"line_number": 203,
"usage_type": "name"
},
{
"api_name": "mindspore.ops.scatter",
"line_number": 204,
"usage_type": "call"
},
{
"api_name": "mindspore.ops",
"line_number": 204,
"usage_type": "name"
},
{
"api_name": "copy.deepcopy",
"line_number": 233,
"usage_type": "call"
},
{
"api_name": "mindspore.ops.shape",
"line_number": 267,
"usage_type": "call"
},
{
"api_name": "mindspore.ops",
"line_number": 267,
"usage_type": "attribute"
},
{
"api_name": "mindspore.ops.shape",
"line_number": 268,
"usage_type": "call"
},
{
"api_name": "mindspore.ops",
"line_number": 268,
"usage_type": "attribute"
},
{
"api_name": "mindspore.ops.flatten",
"line_number": 269,
"usage_type": "call"
},
{
"api_name": "mindspore.ops",
"line_number": 269,
"usage_type": "name"
},
{
"api_name": "mindspore.ops.arange",
"line_number": 271,
"usage_type": "call"
},
{
"api_name": "mindspore.ops",
"line_number": 271,
"usage_type": "name"
},
{
"api_name": "mindspore.ops.unsqueeze",
"line_number": 272,
"usage_type": "call"
},
{
"api_name": "mindspore.ops",
"line_number": 272,
"usage_type": "name"
},
{
"api_name": "mindspore.ops.gather_elements",
"line_number": 272,
"usage_type": "call"
},
{
"api_name": "mindspore.Tensor.gt",
"line_number": 273,
"usage_type": "call"
},
{
"api_name": "mindspore.Tensor",
"line_number": 273,
"usage_type": "attribute"
},
{
"api_name": "mindspore.ops.unsqueeze",
"line_number": 275,
"usage_type": "call"
},
{
"api_name": "mindspore.ops",
"line_number": 275,
"usage_type": "name"
},
{
"api_name": "mindspore.ops.gather_elements",
"line_number": 275,
"usage_type": "call"
},
{
"api_name": "mindspore.nn.probability.distribution.Normal",
"line_number": 277,
"usage_type": "call"
},
{
"api_name": "mindspore.Tensor.where",
"line_number": 280,
"usage_type": "call"
},
{
"api_name": "mindspore.Tensor",
"line_number": 280,
"usage_type": "attribute"
},
{
"api_name": "mindspore.nn.Cell",
"line_number": 284,
"usage_type": "attribute"
},
{
"api_name": "mindspore.nn",
"line_number": 284,
"usage_type": "name"
},
{
"api_name": "mindspore.nn.SequentialCell",
"line_number": 293,
"usage_type": "call"
},
{
"api_name": "mindspore.nn",
"line_number": 293,
"usage_type": "name"
},
{
"api_name": "mindspore.nn.Conv1d",
"line_number": 294,
"usage_type": "call"
},
{
"api_name": "mindspore.nn",
"line_number": 294,
"usage_type": "name"
},
{
"api_name": "mindspore.nn.ReLU",
"line_number": 296,
"usage_type": "call"
},
{
"api_name": "mindspore.nn",
"line_number": 296,
"usage_type": "name"
},
{
"api_name": "mindspore.nn.Cell",
"line_number": 303,
"usage_type": "attribute"
},
{
"api_name": "mindspore.nn",
"line_number": 303,
"usage_type": "name"
},
{
"api_name": "collections.OrderedDict",
"line_number": 307,
"usage_type": "call"
},
{
"api_name": "mindspore.nn.AdaptiveMaxPool1d",
"line_number": 313,
"usage_type": "call"
},
{
"api_name": "mindspore.nn",
"line_number": 313,
"usage_type": "name"
},
{
"api_name": "mindspore.nn.SequentialCell",
"line_number": 314,
"usage_type": "call"
},
{
"api_name": "mindspore.nn",
"line_number": 314,
"usage_type": "name"
},
{
"api_name": "mindspore.ops.squeeze",
"line_number": 318,
"usage_type": "call"
},
{
"api_name": "mindspore.ops",
"line_number": 318,
"usage_type": "name"
},
{
"api_name": "mindspore.nn.Cell",
"line_number": 323,
"usage_type": "attribute"
},
{
"api_name": "mindspore.nn",
"line_number": 323,
"usage_type": "name"
},
{
"api_name": "mindspore.nn.CellList",
"line_number": 327,
"usage_type": "call"
},
{
"api_name": "mindspore.nn",
"line_number": 327,
"usage_type": "name"
},
{
"api_name": "mindspore.ops.Concat",
"line_number": 331,
"usage_type": "call"
},
{
"api_name": "mindspore.ops",
"line_number": 331,
"usage_type": "name"
},
{
"api_name": "mindspore.nn.Dense",
"line_number": 332,
"usage_type": "call"
},
{
"api_name": "mindspore.nn",
"line_number": 332,
"usage_type": "name"
},
{
"api_name": "mindspore_gl.GNNCell",
"line_number": 341,
"usage_type": "name"
},
{
"api_name": "mindspore.nn.Dense",
"line_number": 354,
"usage_type": "call"
},
{
"api_name": "mindspore.nn",
"line_number": 354,
"usage_type": "name"
},
{
"api_name": "mindspore.nn.Dense",
"line_number": 355,
"usage_type": "call"
},
{
"api_name": "mindspore.nn",
"line_number": 355,
"usage_type": "name"
},
{
"api_name": "mindspore_gl.Graph",
"line_number": 357,
"usage_type": "name"
},
{
"api_name": "mindspore.ops.Squeeze",
"line_number": 361,
"usage_type": "call"
},
{
"api_name": "mindspore.ops",
"line_number": 361,
"usage_type": "name"
},
{
"api_name": "mindspore.nn.layer.normalization._BatchNorm",
"line_number": 372,
"usage_type": "name"
},
{
"api_name": "mindspore.ops.batch_norm",
"line_number": 388,
"usage_type": "call"
},
{
"api_name": "mindspore.ops",
"line_number": 388,
"usage_type": "name"
},
{
"api_name": "mindspore.nn.Cell",
"line_number": 394,
"usage_type": "attribute"
},
{
"api_name": "mindspore.nn",
"line_number": 394,
"usage_type": "name"
},
{
"api_name": "mindspore.nn.ReLU",
"line_number": 400,
"usage_type": "call"
},
{
"api_name": "mindspore.nn",
"line_number": 400,
"usage_type": "name"
},
{
"api_name": "mindspore.ops.ExpandDims",
"line_number": 405,
"usage_type": "call"
},
{
"api_name": "mindspore.ops",
"line_number": 405,
"usage_type": "name"
},
{
"api_name": "mindspore.nn.Cell",
"line_number": 414,
"usage_type": "attribute"
},
{
"api_name": "mindspore.nn",
"line_number": 414,
"usage_type": "name"
},
{
"api_name": "mindspore.ops.Concat",
"line_number": 420,
"usage_type": "call"
},
{
"api_name": "mindspore.ops",
"line_number": 420,
"usage_type": "name"
},
{
"api_name": "mindspore.Tensor",
"line_number": 430,
"usage_type": "argument"
},
{
"api_name": "mindspore.nn.Cell",
"line_number": 438,
"usage_type": "attribute"
},
{
"api_name": "mindspore.nn",
"line_number": 438,
"usage_type": "name"
},
{
"api_name": "mindspore.nn.CellList",
"line_number": 442,
"usage_type": "call"
},
{
"api_name": "mindspore.nn",
"line_number": 442,
"usage_type": "name"
},
{
"api_name": "mindspore.ops.Concat",
"line_number": 446,
"usage_type": "call"
},
{
"api_name": "mindspore.ops",
"line_number": 446,
"usage_type": "name"
},
{
"api_name": "mindspore.nn.Cell",
"line_number": 460,
"usage_type": "attribute"
},
{
"api_name": "mindspore.nn",
"line_number": 460,
"usage_type": "name"
},
{
"api_name": "collections.OrderedDict",
"line_number": 464,
"usage_type": "call"
},
{
"api_name": "mindspore.nn.SequentialCell",
"line_number": 476,
"usage_type": "call"
},
{
"api_name": "mindspore.nn",
"line_number": 476,
"usage_type": "name"
},
{
"api_name": "mindspore_gl.nn.AvgPooling",
"line_number": 479,
"usage_type": "call"
},
{
"api_name": "mindspore.nn.Dense",
"line_number": 480,
"usage_type": "call"
},
{
"api_name": "mindspore.nn",
"line_number": 480,
"usage_type": "name"
},
{
"api_name": "mindspore.ops.shape",
"line_number": 484,
"usage_type": "call"
},
{
"api_name": "mindspore.ops",
"line_number": 484,
"usage_type": "name"
},
{
"api_name": "numpy.ones",
"line_number": 487,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 489,
"usage_type": "call"
},
{
"api_name": "mindspore.Tensor",
"line_number": 490,
"usage_type": "call"
},
{
"api_name": "mindspore.int32",
"line_number": 490,
"usage_type": "attribute"
},
{
"api_name": "mindspore.nn.Cell",
"line_number": 502,
"usage_type": "attribute"
},
{
"api_name": "mindspore.nn",
"line_number": 502,
"usage_type": "name"
},
{
"api_name": "mindspore.ops.Concat",
"line_number": 510,
"usage_type": "call"
},
{
"api_name": "mindspore.ops",
"line_number": 510,
"usage_type": "name"
},
{
"api_name": "mindspore.nn.SequentialCell",
"line_number": 511,
"usage_type": "call"
},
{
"api_name": "mindspore.nn",
"line_number": 511,
"usage_type": "name"
},
{
"api_name": "mindspore.nn.Dense",
"line_number": 512,
"usage_type": "call"
},
{
"api_name": "mindspore.nn",
"line_number": 512,
"usage_type": "name"
},
{
"api_name": "mindspore.nn.ReLU",
"line_number": 513,
"usage_type": "call"
},
{
"api_name": "mindspore.nn",
"line_number": 513,
"usage_type": "name"
},
{
"api_name": "mindspore.nn.Dropout",
"line_number": 514,
"usage_type": "call"
},
{
"api_name": "mindspore.nn",
"line_number": 514,
"usage_type": "name"
},
{
"api_name": "mindspore.nn.Dense",
"line_number": 515,
"usage_type": "call"
},
{
"api_name": "mindspore.nn",
"line_number": 515,
"usage_type": "name"
},
{
"api_name": "mindspore.nn.ReLU",
"line_number": 516,
"usage_type": "call"
},
{
"api_name": "mindspore.nn",
"line_number": 516,
"usage_type": "name"
},
{
"api_name": "mindspore.nn.Dropout",
"line_number": 517,
"usage_type": "call"
},
{
"api_name": "mindspore.nn",
"line_number": 517,
"usage_type": "name"
},
{
"api_name": "mindspore.nn.Dense",
"line_number": 518,
"usage_type": "call"
},
{
"api_name": "mindspore.nn",
"line_number": 518,
"usage_type": "name"
},
{
"api_name": "mindspore.nn.ReLU",
"line_number": 519,
"usage_type": "call"
},
{
"api_name": "mindspore.nn",
"line_number": 519,
"usage_type": "name"
},
{
"api_name": "mindspore.nn.Dropout",
"line_number": 520,
"usage_type": "call"
},
{
"api_name": "mindspore.nn",
"line_number": 520,
"usage_type": "name"
},
{
"api_name": "mindspore.nn.Dense",
"line_number": 521,
"usage_type": "call"
},
{
"api_name": "mindspore.nn",
"line_number": 521,
"usage_type": "name"
},
{
"api_name": "mindspore.ops.reshape",
"line_number": 527,
"usage_type": "call"
},
{
"api_name": "mindspore.ops",
"line_number": 527,
"usage_type": "name"
},
{
"api_name": "mindspore.ops.permute",
"line_number": 528,
"usage_type": "call"
},
{
"api_name": "mindspore.ops",
"line_number": 528,
"usage_type": "name"
}
] |
6153769301
|
from django.shortcuts import render
from . models import Department, Employee, User, Phone, Book, Store
from django.http import HttpResponse
# Create your views here.
def index(request):
# without relationship:
user = User.objects.get(pk=1)
phone = Phone.objects.get(user_id=user)
# with relationship:
# related to the model Phone
# user = User.objects.get(pk=1).phone
# using related_name = "number"
# returning object
user_phone = User.objects.get(pk=1).number
# returning string
user_phone_str = User.objects.get(pk=1).number.phone_no
print("--------------------------------------")
print(user_phone)
# reverse geting user from phone
user = Phone.objects.get(id=1).user_id
return HttpResponse(user)
def foreign_key(request):
# using REVERSE !!!IMPORTANT!!!
user = Employee.objects.get(name="Simpson").department_name
user = Employee.objects.get(name="Simpson").department_name.name
# print(user)
# print(type(user))
first_deparment = Department.objects.get(pk=1) #hr
second_deparment = Department.objects.get(pk=2) #accouts
third_deparment = Department.objects.get(pk=5) #purchase
deparments = Employee.objects.filter(department_name=third_deparment)
# related name !!! IMPORTANT!!! -- another way employee_set.all() -- doesnt work
employees_from_dep = first_deparment.employeees_rel.all()
employees_from_dep = first_deparment.employeees_rel.all().filter()
#print(employees_from_dep)
# related name !!! IMPORTANT!!!
# after rel name __double underscore through which i can manipulate others model fields
dep_all_empl = Department.objects.all().filter(employeees_rel__name__startswith="John")
#if there is NO related name it will work next line will work with name of model
#dep_all_empl = Department.objects.all().filter(employee__name__startswith="John")
# reverse !!!!!!! VERY IMPORTANT !!!!!!
employees_in_HR = Employee.objects.filter(department_name__name='Accounts')
print(employees_in_HR)
return HttpResponse(employees_in_HR)
# select_related
# https://docs.djangoproject.com/en/3.0/ref/models/querysets/#select-related
def sel_related(request):
# fetching all employees and printing their names #INNER join
employees = Employee.objects.all().select_related('department_name')
#employees = Employee.objects.all()
for i in employees:
print(i.name, i.department_name.name)
return render(request, 'core/stuff.html')
# for debuger
def users(request):
qs = User.objects.all()
return render(request, 'core/users.html',{
"users": qs,
})
def prefetched(request):
books = Book.objects.all()
books = Book.objects.all().prefetch_related('store_set')
#looking for all STORES and check THIS BOOK in it
for i in books:
print(i.store_set.all())
return render(request, 'core/users.html')
|
oruchkin/biteofpithon
|
django relationships/relation/core/views.py
|
views.py
|
py
| 3,072 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "models.User.objects.get",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "models.User.objects",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "models.User",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "models.Phone.objects.get",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "models.Phone.objects",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "models.Phone",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "models.User.objects.get",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "models.User.objects",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "models.User",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "models.User.objects.get",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "models.User.objects",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "models.User",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "models.Phone.objects.get",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "models.Phone.objects",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "models.Phone",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "models.Employee.objects.get",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "models.Employee.objects",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "models.Employee",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "models.Employee.objects.get",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "models.Employee.objects",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "models.Employee",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "models.Department.objects.get",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "models.Department.objects",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "models.Department",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "models.Department.objects.get",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "models.Department.objects",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "models.Department",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "models.Department.objects.get",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "models.Department.objects",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "models.Department",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "models.Employee.objects.filter",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "models.Employee.objects",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "models.Employee",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "models.Department.objects.all",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "models.Department.objects",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "models.Department",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "models.Employee.objects.filter",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "models.Employee.objects",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "models.Employee",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "models.Employee.objects.all",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "models.Employee.objects",
"line_number": 73,
"usage_type": "attribute"
},
{
"api_name": "models.Employee",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "models.User.objects.all",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "models.User.objects",
"line_number": 84,
"usage_type": "attribute"
},
{
"api_name": "models.User",
"line_number": 84,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "models.Book.objects.all",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "models.Book.objects",
"line_number": 91,
"usage_type": "attribute"
},
{
"api_name": "models.Book",
"line_number": 91,
"usage_type": "name"
},
{
"api_name": "models.Book.objects.all",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "models.Book.objects",
"line_number": 92,
"usage_type": "attribute"
},
{
"api_name": "models.Book",
"line_number": 92,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 98,
"usage_type": "call"
}
] |
28114724866
|
import matplotlib.pyplot as plt
plt.rcParams['font.sans-serif'] = ['Microsoft YaHei']
# 数据
sizes = ["0.5x0.5", "2x2", "5x5", "7x7"]
postgres = [2, 10, 94, 153]
accumulo = [8, 15, 22, 41]
# 绘图
plt.plot(sizes, postgres, label="PostgreSQL")
plt.plot(sizes, accumulo, label="Accumulo")
plt.xlabel("Extent(km²)")
plt.ylabel("Read Time(s)")
plt.legend()
# 显示图形
plt.show()
|
KiktMa/gdal_tiff
|
shange/paintsd.py
|
paintsd.py
|
py
| 386 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "matplotlib.pyplot.rcParams",
"line_number": 3,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 3,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 18,
"usage_type": "name"
}
] |
31127201950
|
# to run, execute this command in the command line:
# python create_plots.py pagecounts-20160802-150000.txt pagecounts-20160803-150000.txt
import sys
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
filename1 = sys.argv[1]
filename2 = sys.argv[2]
data1 = pd.read_table(filename1, sep=' ', header=None, index_col=1,
names=['lang', 'page', 'views', 'bytes'])
data1_sort = data1.sort_values(by=['views'], ascending=False)
# print(data1_sort)
data2 = pd.read_table(filename2, sep=' ', header=None, index_col=1,
names=['lang', 'page', 'views', 'bytes'])
data2_sort = data2.sort_values(by=['views'], ascending=False)
data1_sort['views2'] = data2_sort['views']
# print (data1_sort)
# print (data2_sort)
plt.figure(figsize=(10, 5)) # change the size to something sensible
plt.subplot(1, 2, 1) # subplots in 1 row, 2 columns, select the first
plt.plot(data1_sort['views'].values)
plt.title('Popularity Distribution')
plt.xlabel('Rank')
plt.ylabel('Views')
plt.subplot(1, 2, 2) # ... and then select the second
plt.scatter(data1_sort['views'].values, data1_sort['views2'].values)
plt.title('Daily Correlation')
plt.xlabel('Day 1 views')
plt.ylabel('Day 2 views')
plt.xscale('log')
plt.yscale('log')
# plt.show()
plt.savefig('wikipedia_Tom.png')
|
tomliangg/Plotting_Wikipedia_Page_Views
|
create_plots.py
|
create_plots.py
|
py
| 1,324 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "sys.argv",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "pandas.read_table",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pandas.read_table",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xscale",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.yscale",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 40,
"usage_type": "name"
}
] |
70111562109
|
import requests
def run():
api_key = 'f258ca5a16d84339a5f6cdb4c7700756'
query_map = {}
url = 'http://api.weatherbit.io/v2.0/current'
query_map['lat'] = 39.757
query_map['lon'] = -75.742
query_map['key'] = api_key
query_map['lang'] = 'en'
response = requests.get(url,params=query_map).json()
return [(float(response['data'][0]['temp']) * 9/5) + 32,response['data'][0]['weather']['description']]
run()
|
cthacker-udel/Raspberry-Pi-Scripts
|
py/getcurrweather.py
|
getcurrweather.py
|
py
| 441 |
python
|
en
|
code
| 7 |
github-code
|
6
|
[
{
"api_name": "requests.get",
"line_number": 12,
"usage_type": "call"
}
] |
3981685761
|
from django.test import TestCase
from django import forms
from parameterized import parameterized
from MDM.forms import ItemGroupForm
from MDM.bootstrap import INPUT
class ItemGroupFormTest(TestCase):
@parameterized.expand([
('description', 'Descrição do Grupo de Item'),
])
def test_form_labels(self, field, label):
form = ItemGroupForm()
self.assertEqual(form.fields[field].label, label)
@parameterized.expand([
('description', forms.TextInput),
])
def test_form_widgets(self, field, widget):
form = ItemGroupForm()
self.assertIsInstance(form.fields[field].widget, widget)
def test_form_valid_data(self):
form = ItemGroupForm(data={
'description': 'Grupo de Itens',
})
self.assertTrue(form.is_valid())
def test_form_invalid_data(self):
form = ItemGroupForm(data={
'description': '',
})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
@parameterized.expand([
('description', INPUT['class']),
])
def test_form_widgets_attrs(self, field, attrs):
form = ItemGroupForm()
self.assertEqual(form.fields[field].widget.attrs['class'], attrs)
|
tiagomend/flow_erp
|
MDM/tests/test_item_group_form.py
|
test_item_group_form.py
|
py
| 1,264 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.test.TestCase",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "MDM.forms.ItemGroupForm",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "parameterized.parameterized.expand",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "parameterized.parameterized",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "MDM.forms.ItemGroupForm",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "parameterized.parameterized.expand",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "parameterized.parameterized",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "django.forms.TextInput",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "MDM.forms.ItemGroupForm",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "MDM.forms.ItemGroupForm",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "MDM.forms.ItemGroupForm",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "parameterized.parameterized.expand",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "parameterized.parameterized",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "MDM.bootstrap.INPUT",
"line_number": 39,
"usage_type": "name"
}
] |
25390435322
|
import logging
import requests
import pandas as pd
import time
from .sqlite import Orders
from .sqlite import Balances
class BitMex():
def pull_bitmex_orderbooks(symbol, limit, mode='live'):
# Tracking execution time
start_ts = time.time() * 1000
# Get request
request = requests.get('https://www.bitmex.com/api/v1/orderBook/L2?symbol={}&depth={}'.format(symbol, limit))
bitmex = request.json()
# Check to make sure data is pulled properly
if request.status_code == 200:
# Track latency
req_ts = int(time.time())
sell_arr = []
buy_arr = []
for item in bitmex:
# ['Price','Amount','Value']
row = [item['price'], item['size'], item['price'] * item['size']]
if item['side'] == "Sell":
sell_arr.append(row)
if item['side'] == "Buy":
buy_arr.append(row)
# Extract Bids and Asks to DFs
df_buy = pd.DataFrame(buy_arr)
df_sell = pd.DataFrame(sell_arr)
#Ensure that DFs are not empty
if len(df_buy) == 0:
df_buy = pd.DataFrame([[0,0,0]])
if len(df_sell) == 0:
df_sell = pd.DataFrame([[0,0,0]])
df_buy.columns = df_sell.columns = ['Price','Amount','Value']
# # Write order book data to databae
for row in buy_arr:
Orders.create(
ts=req_ts,
price=row[0],
amount=row[1],
value=row[2],
b_a='b'
)
for row in sell_arr:
Orders.create(
ts=req_ts,
price=row[0],
amount=row[1],
value=row[2],
b_a='a'
)
final_ts = time.time() * 1000
# Log request
req_log = [start_ts, req_ts, final_ts, request.status_code, symbol, 'orders']
logging.info(req_log)
return (df_buy, df_sell)
else:
logging.warning("Orderbook request failure.")
logging.warning(request.json())
return(None, None)
## check_pending_orders will check if there are pending orders.
## It makes decision based on whether there are pending orders and whether we have
## an appropriate balance and will return a decision of 'BUY' or 'SELL'
## default: 'SELL'
def check_pending_orders(symbol, client, c_maj, c_min, current_balance, thresh, trade_alloc):
# Get pending orders
logging.info("Checking pending orders...")
# TODO: get this dynamically
symbol = "BTC/USD"
bitmex = client.fetch_open_orders(symbol)
## this is some data munging that we have to do because bitmex doesn't
## return a nice object
sell_arr = []
buy_arr = []
for item in bitmex:
# ['orderID','Price','Amount','Value']
row = [item['info']['orderID'], item['info']['price'], item['info']['orderQty'], item['info']['price'] * item['info']['orderQty']]
if item['info']['side'] == "Sell":
sell_arr.append(row)
if item['info']['side'] == "Buy":
buy_arr.append(row)
pending_orders = {'BUY': buy_arr, 'SELL': sell_arr}
if pending_orders != []:
if(len(pending_orders['BUY']) + len(pending_orders['SELL']) == 0):
for c in (c_maj, c_min):
coin = Balances.select().where(Balances.coin == c).order_by(Balances.id.desc()).get()
current_balance[c] = coin.balance
logging.info("Checking balances....")
# do a balance check to see whether we can trade with current balance
# based on threshold
decision = BitMex.balance_check(current_balance[c_maj], current_balance[c_min], thresh, trade_alloc)
if decision:
return('BUY', pending_orders)
else:
return('SELL', pending_orders)
else:
if(len(pending_orders['BUY']) > 0):
return('BUY', pending_orders)
else:
return('SELL', pending_orders)
# TODO: what should we do if no pending orders?
# return('SELL', pending_orders)
## DONE
def balance_check(balance_maj, balance_min, thresh, trade_alloc):
# major = the one you're quoting.
# minor = the one you're quoting in.
# balance_maj is major coin balance
# balance_min is minor coin balance
# thresh is threshold under which you buy the major pair
# trade_alloc is the allocated amount to trade
return((balance_maj <= thresh) and (balance_min >= trade_alloc))
## eliminate_excess_orders will _ all but the best order
def eliminate_excess_orders(df, decision):
# checks for all excess orders and returns list of non-optimal oID to cancel
logging.info("Eliminating excess orders...")
print(o_df)
o_df = pd.DataFrame(df)
o_df.columns = ['ts','bs','p','a','deal','oid']
if(decision == 'BUY'):
o_optimal = o_df.p.max()
else:
o_optimal = o_df.p.min()
oid_keep = o_df[o_df.p == o_optimal].oid
orders_to_cancel = [i for i in o_df[o_df.oid != oid_keep[0]].oid]
return orders_to_cancel
def update_order(pending_orders, o_optimal, decision, trade_alloc, client, symbol):
pair = symbol.replace('-','')
# cancel all orders
resp = self.cancel_all_orders(client, pending_orders, decision)
logging.info("Canceling All Orders for {}: {} Side: {}".format(pair, resp, decision))
log_request(conn, time.time(), resp, pair, 'cancel_order - {}'.format(decision))
# issue order
resp = issue_order(decision, symbol, o_optimal, trade_alloc/o_optimal, conn)
logging.info("Issuing Orders for {}: {} Side: {}".format(pair, resp, decision))
return('Order Updated')
def cancel_all_orders(self, client, orders, decision):
# order[0] = orderID
for order in orders[decision]:
logging.info("Cancelling order: {}".format(order[0]))
try:
client.cancelOrder(order[0])
except OrderNotFound as e:
logging.info("Cancelling Excess Orders {} [Fail]:".format(order[0], e))
## TODO: update with better logging
def issue_order(decision, symbol, price, amount, client, precision=0):
try:
# initialize temporary client to avoid UNAUTH
# TODO: don't hard code this
ccxt_sym = "BTC/USD"
print("issue order")
if(decision == 'BUY'):
rresp = client.create_limit_buy_order(ccxt_sym, amount, price)
oid = rresp['id']
log_trade(conn, symbol, price, amount, oid, decision)
return(oid)
if(decision == 'SELL'):
# To catch bad precision loopback re-order
if (precision > 0):
print('Debug precision: ', amount, str(amount))
rresp = client.create_limit_sell_order(ccxt_sym, amount, price)
else:
rresp = client.create_limit_sell_order(ccxt_sym, amount, price)
oid = rresp['id']
log_trade(conn, symbol, price, amount, oid, decision)
return(oid)
except Exception as issue_error:
print(type(issue_error))
print(issue_error.args)
print(str(issue_error.args[0]).replace(',','|'))
# In scenario with improper amount precision
if ('precision of amount' in str(issue_error.args)):
logging.warning(str('Improper Amount Precision - {}'.format(str(issue_error.args[0]))))
m = re.search('(The precision of amount).*[0-9]{1}', str(issue_error.args[0]))
precision = int(m.group(0)[-1])
print(precision)
order_amount = truncate(amount, precision)
if (order_amount > 0.0):
print('Reissuing order', order_amount, precision)
issue_order(decision, symbol, price, order_amount, conn, precision)
return('Reissued Order')
else:
return('Error issueing order: order_amount too low for precision')
return(str(issue_error).replace(',','|'))
def is_best_order(decision, symbol, o_optimal, client, pending_orders, order_df):
pair = symbol.replace('-','')
if (decision == 'BUY'):
if (o_optimal > pending_orders['BUY'][0][2]):
return(False)
else:
return(True)
elif (decision == 'SELL'):
if (o_optimal < pending_orders['SELL'][0][2]):
return(False)
else:
return(True)
|
noqcks/bmex-algo
|
src/bitmex.py
|
bitmex.py
|
py
| 8,025 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "time.time",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "sqlite.Orders.create",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "sqlite.Orders",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "sqlite.Orders.create",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "sqlite.Orders",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "time.time",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "logging.warning",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "logging.warning",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "sqlite.Balances.select",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "sqlite.Balances",
"line_number": 106,
"usage_type": "name"
},
{
"api_name": "sqlite.Balances.coin",
"line_number": 106,
"usage_type": "attribute"
},
{
"api_name": "sqlite.Balances.id.desc",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "sqlite.Balances.id",
"line_number": 106,
"usage_type": "attribute"
},
{
"api_name": "logging.info",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 172,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "logging.warning",
"line_number": 211,
"usage_type": "call"
}
] |
776580237
|
from pprint import pprint
import requests
import json
# Задание 1 Посмотреть документацию к API GitHub, разобраться как вывести список репозиториев для конкретного пользователя,
# сохранить JSON-вывод в файле *.json.
url = 'https://api.github.com/users/Karamba278/repos' #Сразу вставил имя юзера (себя) в ссылку, но можнод для
# удобства и отдельно его вводить
response = requests.get(url)
j_data = response.json()
#pprint(j_data)
for repos in j_data:
print(repos['name'])
# т.к. нам могут потребоваться и другие данные, то запишем в файл всю информацию о репозиториях
with open('GitHubUserRep.json', "w") as write_f:
json.dump(j_data, write_f)
# Задание 2 Изучить список открытых API. Найти среди них любое, требующее авторизацию (любого типа).
# Выполнить запросы к нему, пройдя авторизацию. Ответ сервера записать в файл.
# Раз на вебинаре разрекламировали НАСА, то возьмем НАСА API :)
url = 'https://api.nasa.gov/planetary/apod'
api_key = 'aydx4dSBSpKHD8tGxFrVqOYxxe2df2lirt0rKGxj'
params = {'api_key':api_key}
response2 = requests.get(url, params=params)
j_data2 = response2.json()
pprint(j_data2)
with open('NASA_data.json', "w") as write_f2:
json.dump(j_data2, write_f2)
|
Karamba278/parsing
|
DZ1/lesson1.py
|
lesson1.py
|
py
| 1,643 |
python
|
ru
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "requests.get",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "pprint.pprint",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 34,
"usage_type": "call"
}
] |
42433734628
|
import pytest
import random
from torchrl.envs import make_gym_env, TransitionMonitor
@pytest.mark.parametrize('spec_id', [
'Acrobot-v1',
'CartPole-v1',
'MountainCar-v0',
'MountainCarContinuous-v0',
'Pendulum-v0',
])
def test_transition_monitor(spec_id: str):
env = TransitionMonitor(make_gym_env(spec_id))
for _ in range(3):
env.reset()
info = env.info
assert not env.is_done
assert len(env.transitions) == 0
assert info.get('len') == 0
assert info.get('return') == 0.0
flushed_transitions = []
while not env.is_done:
env.step(env.action_space.sample())
if random.random() < 0.2: # Flush with probability 0.2
flushed_transitions += env.flush()
flushed_transitions += env.flush()
info = env.info
assert info.get('return') is not None
assert info.get('len') > 0
assert info.get('len') == len(flushed_transitions)
assert len(env.transitions) == 0
env.close()
|
activatedgeek/torchrl
|
torchrl/envs/test_wrappers.py
|
test_wrappers.py
|
py
| 968 |
python
|
en
|
code
| 110 |
github-code
|
6
|
[
{
"api_name": "torchrl.envs.TransitionMonitor",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "torchrl.envs.make_gym_env",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "random.random",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 7,
"usage_type": "attribute"
}
] |
8516167540
|
import numpy as np
import matplotlib.pyplot as plt
import h5py
path_Lf3D = "/mn/stornext/d19/RoCS/alma/emissa_sim/linfor3D/outhdf/"
f = {"500nm" : h5py.File(path_Lf3D + "d3t57g44c_v000G_n019_it000_05000_mu1_00_linfor_3D_2.hdf", "r"),
"1mm" : h5py.File(path_Lf3D + "d3t57g44c_v000G_n019_it000_01mm_mu1_00_linfor_3D_2.hdf", "r"),
"3mm" : h5py.File(path_Lf3D + "d3t57g44c_v000G_n019_it000_03mm_mu1_00_linfor_3D_2.hdf", "r")}
F = h5py.File("/mn/stornext/d19/RoCS/svenwe/jonast/data/art/input/tst/d3t57g44_v000G_n019_art_it000_mode1.h5", "r")
fig,ax = plt.subplots(figsize=(8,6))
for i,key in enumerate(f.keys()):
z_CF = np.array(f[key]["contfctz"])*1e-8
k = np.argmax(z_CF)
z_CF = z_CF[:k+1]
CF = np.mean(np.array(f[key]["contfunc"][:k+1,:,:]), axis=(1,2))
ax.plot(z_CF, CF/np.max(CF))
ax.fill_between(z_CF, np.zeros(len(CF)), CF/np.max(CF), alpha=0.5, label=r"$\lambda = $ {:} {:}".format(key[:-2], key[-2:]))
ax.set_axisbelow(True)
ax.grid()
#ax.axvline(x=
ax.legend()
ax.set_xlabel("z [Mm]")
ax.set_ylabel("norm. cont. func.")
figname="mean_CF.pdf"
plt.savefig("figures/"+figname, bbox_inches="tight")
|
jonasrth/MSc-plots
|
mean_CF.py
|
mean_CF.py
|
py
| 1,183 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "h5py.File",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "h5py.File",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "h5py.File",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "h5py.File",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.argmax",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 36,
"usage_type": "name"
}
] |
43970042116
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
AUTHOR
Pedro Cerqueira
github: @pedrorvc
DESCRIPTION
This script serves to create xml files contaning the information necessary
for the execution of BRIG (Blast Ring Image Generator), reducing the time
performing the tedious task of setting up all the information on the GUI
and provides a quick way to produce an image.
The arguments for this script provide some (but not all)
of the available options in BRIG, which were the ones I used to change the most.
USAGE:
brigaid.py -q reference_sequence.fna -rfd path/to/reference/dir -od path/to/output/dir -of path/to/output/dir/output_file
-oi path/to/output/BRIG/output_image -t Image_title -a annotation_file.gbk --genes genes_of_interest.txt
--contig-order contig_order.tsv
"""
import argparse
import csv
import os
import xml.etree.ElementTree as ET
from collections import OrderedDict
from xml.dom import minidom
from Bio import SeqIO
from matplotlib import cm
def listdir_fullpath(path):
""" Gets the full path of the files from a directory
Args:
path (str): full path to a directory
Returns:
list containing the full path of every file contained in the input directory
"""
return [os.path.join(path, f) for f in os.listdir(path)]
def ring_attributes(colour, name, position):
""" Creates ring attributes.
Args:
colour (str): color of the ring.
name (str): name of the ring.
position (str): position of the ring.
Returns:
ring_attrs (dict): attributes of any regular ring of the BRIG xml.
"""
ring_attrs = {"colour" : colour,
"name": name,
"position" : position,
"upperInt" : "90",
"lowerInt" : "70",
"legend" : "yes",
"size" : "30",
"labels" : "no",
"blastType" : "blastn"}
return ring_attrs
def annotation_ring_attributes(position):
""" Creates annotation ring attributes.
Args:
position (str): position of the ring.
Returns:
annotation_ring_attrs (dict): attributes of the annotation ring of the BRIG xml.
"""
annotation_ring_attrs = {"colour" : '172,14,225',
"name": 'null',
"position" : position,
"upperInt" : "70",
"lowerInt" : "50",
"legend" : "yes",
"size" : "30",
"labels" : "no",
"blastType" : "blastn"}
return annotation_ring_attrs
def create_feature_attrs(label, colour, decoration, start, stop):
""" Create attributes for the Feature SubElements of the annotation ring.
Args:
label (str): name of the gene/CDS to annotate
colour (str): colour of the decoration for the annotation
decoration (str): shape of the gene/CDS to annotate, for example, 'clockwise-arrow'
start (str): start of the gene/CDS to annotate
stop (str): stop of the gene/CDS to annotate
Results:
feature_element_attrs (dict): attributes of the feature element.
feature_range_element_attrs (dict): attributes of the feature range element
"""
feature_element_attrs = {'label' : label,
'colour' : colour,
'decoration' : decoration}
feature_range_element_attrs = {'start' : start,
'stop' : stop}
return feature_element_attrs, feature_range_element_attrs
def create_annotation_ring_tsv(annotation_ring, annotation_file):
""" Uses a tsv file to annotate the reference genome.
Args:
annotation_ring: ElementTree SubElement object containing the 'ring' tag and its attributes.
annotation_file (str): Full path to the file containing annotations for the reference genome.
"""
with open(annotation_file) as tsvfile:
reader = csv.DictReader(tsvfile, dialect='excel-tab')
# Obtain the annotations from the file contents
for row in reader:
start = row['#START']
stop = row['STOP']
label = row['Label']
colour = row['Colour']
decoration = row['Decoration']
# Create xml attributes
feature_element_attrs, feature_range_element_attrs = create_feature_attrs(label, colour, decoration, start, stop)
# Create xml elements
feature_element = ET.SubElement(annotation_ring, 'feature', attrib=feature_element_attrs)
feature_range_element = ET.SubElement(feature_element, 'featureRange', attrib=feature_range_element_attrs)
def annotation_ring_feature_elements_gbk_concat(annotation_ring, record, genome_size=False):
""" Creates the annotation ring feature elements, using a concatenated Genbank annotation file.
Args:
annotation_ring: ElementTree SubElement object containing the 'ring' tag and its attributes.
record (SeqRecord): Object of BioPython containing the information of the input Genbank.
genome_size (bool): Size of genome. Integer when a Genbank divided by contigs is provided.
Boolean (False) when a concatenated Genbank is provided.
"""
#if type(genome_size) == int:
# Obtain the features of the Genbank file records
for fea in record.features:
# Get the start and end position of the genome
# Also get the strand
if fea.type == 'CDS':
start = str(fea.location.start.position)
end = str(fea.location.end.position)
strand = fea.location.strand
# Get the label of the gene or product
if 'gene' in fea.qualifiers:
label = str(fea.qualifiers['gene'][0])
elif 'product' in fea.qualifiers:
product = fea.qualifiers['product'][0]
label = str(product)
else:
continue
# Define the decoration of the annotation based on the strand
if strand == -1:
decoration = 'counterclockwise-arrow'
elif strand == 1:
decoration = 'clockwise-arrow'
# Create xml attributes
feature_element_attrs, feature_range_element_attrs = create_feature_attrs(label, "black", decoration, start, end)
# Create xml elements
feature_element = ET.SubElement(annotation_ring, 'feature', attrib=feature_element_attrs)
feature_range_element = ET.SubElement(feature_element, 'featureRange', attrib=feature_range_element_attrs)
# If a genome size is provided, get the size of the records
if type(genome_size) == int:
if fea.type == 'source':
size = fea.location.end.position
try:
size
genome_size += size
return genome_size
except NameError:
pass
def annotation_ring_feature_elements_genes_of_interest_gbk_concat(annotation_ring, record, genes, genome_size=False):
""" Creates the annotation ring feature elements, using a concatenated Genbank annotation file
and specific gene annotations.
Args:
annotation_ring: ElementTree SubElement object containing the 'ring' tag and its attributes.
record (SeqRecord): Object of BioPython containing the information of the input Genbank.
genome_size (bool): Size of genome. Integer when a Genbank divided by contigs is provided.
Boolean (False) when a concatenated Genbank is provided.
"""
for f in record.features:
if f.type == 'CDS':
# Find the 'gene' tag and determine if the gene belongs to the specified genes to be annotated
if 'gene' in f.qualifiers and f.qualifiers['gene'][0] in genes:
label = f.qualifiers['gene'][0]
elif 'product' in f.qualifiers and f.qualifiers['product'][0] in genes:
product = f.qualifiers['product'][0]
label = product
else:
continue
# Determine the start, stop and strand of the gene
start = str(f.location.start.position + genome_size)
end = str(f.location.end.position + genome_size)
strand = f.location.strand
# Define the decoration of the annotation based on the strand
if strand == -1:
decoration = 'counterclockwise-arrow'
elif strand == 1:
decoration = 'clockwise-arrow'
# Create xml attributes
feature_element_attrs, feature_range_element_attrs = create_feature_attrs(label, "black", decoration, start, end)
# Create xml elements
feature_element = ET.SubElement(annotation_ring, 'feature', attrib=feature_element_attrs)
feature_range_element = ET.SubElement(feature_element, 'featureRange', attrib=feature_range_element_attrs)
# If a genome size is provided, get the size of the records
if type(genome_size) == int:
if f.type == "source":
size = f.location.end.position
try:
size
genome_size += size
return genome_size
except NameError:
pass
def create_annotation_ring_gbk_concat(annotation_ring, annotation_file, genes_of_interest, records):
""" Create annotation ring using a concatenated Genbank annotation file.
Args:
annotation_ring: ElementTree SubElement object containing the 'ring' tag and its attributes.
annotation_file (str): Full path to the file containing annotations for the reference genome.
genes_of_interest (str): Full path to the file containing the genes to search for in the Genbank file.
records (SeqRecord): Object of BioPython containing the information of the input Genbank.
"""
if genes_of_interest != []:
# Get the genes to serach in the Genbank file
with open(genes_of_interest, "r") as f:
genes = f.readlines()
genes = [gene.rstrip() for gene in genes]
# Create feature elements of the annotation ring
for seq_record in records:
annotation_ring_feature_elements_genes_of_interest_gbk_concat(annotation_ring, seq_record, genes)
else:
for seq_record in records:
annotation_ring_feature_elements_gbk_concat(annotation_ring, seq_record)
def create_annotation_ring_gbk_contigs(annotation_ring, annotation_file, records, genes_of_interest, contig_order):
""" Create annotation ring using a Genbank annotation file divided by contigs.
Args:
annotation_ring: ElementTree SubElement object containing the 'ring' tag and its attributes.
annotation_file (str): Full path to the file containing annotations for the reference genome.
genes_of_interest (str): Full path to the file containing the genes to search for in the Genbank file.
records (SeqRecord): Object of BioPython containing the information of the input Genbank.
contig_order (str): Full path to the file containing the order of the contigs.
"""
if contig_order != []:
with open(contig_order) as tsvfile:
reader = csv.DictReader(tsvfile, dialect='excel-tab')
# Create an OrderedDict with the contents of the file
# The keys are the order are a number representing the order of the contig
# The values are the names of the contigs
content_dict = OrderedDict()
for r in reader:
content_dict[r["order"]] = r["contig"]
# Create an OrderedDict with the content of each contig
# The keys are the names of the contigs
# The values are SeqRecord objects from BipPython
seq_records_dict = OrderedDict()
for record in records:
seq_records_dict[record.id] = record
if genes_of_interest != []:
with open(genes_of_interest, "r") as f:
genes = f.readlines()
genes = [gene.rstrip() for gene in genes]
genome_size = 0
for i in range(1, len(records)+1):
ord_record = seq_records_dict[content_dict[str(i)]]
gsize = annotation_ring_feature_elements_genes_of_interest_gbk_concat(annotation_ring, ord_record, genes, genome_size)
genome_size = gsize
else:
genome_size = 0
for i in range(1, len(records)+1):
ord_record = seq_records_dict[content_dict[str(i)]]
gsize = annotation_ring_feature_elements_gbk_concat(annotation_ring, ord_record, genome_size)
genome_size = gsize
else:
if genes_of_interest != []:
with open(genes_of_interest, "r") as f:
genes = f.readlines()
genes = [gene.rstrip() for gene in genes]
for seq_record in records:
annotation_ring_feature_elements_genes_of_interest_gbk_concat(annotation_ring, seq_record, genes)
else:
for seq_record in records:
annotation_ring_feature_elements_gbk_concat(annotation_ring, seq_record)
def write_xml(root_elem, output_file):
""" Writes a xml file.
Args:
root_elem is a ElementTree Element object containing all the information
required for the output file.
output_file (str): full path to the output file
"""
xml_file = ET.tostring(root_elem, encoding='utf8').decode('utf8')
pretty_xml_file = minidom.parseString(xml_file).toprettyxml(indent=' ')
output_file = output_file + ".xml"
with open(output_file, "w") as f:
f.write(pretty_xml_file)
####### Create xml elemnts
# Create root element
def create_root_element(blast_options, legend_position, query_file,
output_folder, image_output_file, title, image_format):
"""
Creates the root element of the xml file and its attributes.
Args:
blast_options (str): additional options for blast, for example, -evalue or num_threads
legend_position (str): position of the legend on the image
query_file (str): full path to the query file
output_folder (str): full path to the output folder
image_output_file (str): full path to the image output file
title (str): title of the output image
image_format (str): format of the image output file
Returns:
root: ElementTree Element object containing the BRIG tag and its attributes
"""
root_attrs = {"blastOptions" : blast_options,
"legendPosition" : legend_position,
"queryFile" : query_file,
"outputFolder" : output_folder,
"blastPlus" : "yes",
"outputFile" : os.path.join(output_folder, image_output_file),
"title" : title,
"imageFormat" : image_format,
"queryFastaFile" : query_file,
"cgXML" : os.path.join(output_folder + "/scratch", os.path.basename(query_file) + ".xml")}
root = ET.Element('BRIG', attrib=root_attrs)
return root
#### Create root children
# Create cgview_settings element
def create_cgview_settings_element(root, height, width):
""" Creates the cgview_settings element of the xml file and its attributes.
Args:
root: ElementTree Element object containing the BRIG tag and its attributes.
height (str): height of the output image in pixels
width (str): width of the output image in pixels
Returns:
cgview_settings: ElementTree SubElement object containing the cgview settings tag and its attributes
"""
cgview_settings_attrs = {"arrowheadLength" : "medium",
"backboneColor" : "black",
"backboneRadius" : "600",
"backboneThickness" : "medium",
"backgroundColor" : "white",
"borderColor" : "black",
"featureSlotSpacing" : "medium",
"featureThickness" : "30",
"giveFeaturePositions" : "false",
"globalLabel" : "true",
"height" : height,
"isLinear" : "false",
"labelFont" : "SansSerif,plain,25",
"labelLineLength" : "medium",
"labelLineThickness" : "medium",
"labelPlacementQuality" : "best",
"labelsToKeep" : "1000",
"longTickColor" : "black",
"minimumFeatureLength" : "medium",
"moveInnerLabelsToOuter" :"true",
"origin" : "12",
"rulerFont" : "SansSerif,plain,35",
"rulerFontColor" : "black",
"rulerPadding" : "40",
"rulerUnits" : "bases",
"shortTickColor" : "black",
"shortTickThickness" : "medium",
"showBorder" : "false",
"showShading" : "true",
"showWarning" : "false",
"tickDensity" : "0.2333",
"tickThickness" : "medium",
"titleFont" : "SansSerif,plain,45",
"titleFontColor" : "black",
"useColoredLabelBackgrounds" : "false",
"useInnerLabels" : "true",
"warningFont" : "Default,plain,35",
"warningFontColor" : "black",
"width" : width,
"zeroTickColor" : "black",
"tickLength" : "medium"}
cgview_settings = ET.SubElement(root, 'cgview_settings', attrib=cgview_settings_attrs)
return cgview_settings
# Create brig_settings element
def create_brig_settings_element(root, java_memory):
""" Creates the brig_settings element of the xml file and its attributes.
Args:
root: ElementTree Element object containing the BRIG tag and its attributes.
java_memory (str): amount of memory (in bytes) java is allowed to use for BRIG
Returns:
brig_settings: ElementTree SubElement object containing the brig settings tag and its attributes
"""
brig_settings_attrs = {"Ring1" : "172,14,225",
"Ring2" : "222,149,220",
"Ring3" : "161,221,231",
"Ring4" : "49,34,221",
"Ring5" : "116,152,226",
"Ring6" : "224,206,38",
"Ring7" : "40,191,140",
"Ring8" : "158,223,139",
"Ring9" : "226,38,122",
"Ring10" :"211,41,77",
"defaultUpper" : "70",
"defaultLower" : "50",
"defaultMinimum" : "50",
"genbankFiles" : "gbk,gb,genbank",
"fastaFiles" : "fna,faa,fas,fasta,fa",
"emblFiles" : "embl",
"blastLocation" : "",
"divider" : "3",
"multiplier" : "3",
"memory" : java_memory,
"defaultSpacer" : "0"}
brig_settings = ET.SubElement(root,
"brig_settings",
attrib=brig_settings_attrs)
return brig_settings
## Create special element
def create_special_element(root):
"""Creates the 'special' element of the xml file and its attributes
Args:
root: ElementTree Element object containing the BRIG tag and its attributes.
Returns:
gc_content_special: ElementTree SubElement object containing the 'special' tag and its attributes
gc_skew_special: ElementTree SubElement object containing the 'special' tag and its attributes
"""
gc_content_special = ET.SubElement(root, 'special', attrib={'value' : 'GC Content'})
gc_skew_special = ET.SubElement(root, 'special', attrib={'value' : 'GC Skew'})
return gc_content_special, gc_skew_special
# Create reference dir element
def create_reference_directory_element(root, reference_directory):
""" Creates the 'reference directory' element of the xml file and its attributes.
Args:
root: ElementTree Element object containing the 'BRIG' tag and its attributes.
reference_directory (str): full path to the reference directory that contains
the fasta files used to build the rings.
Returns:
ref_file: ElementTree SubElement object containing the 'refFile' tag and its attributes
"""
ref_dir = ET.SubElement(root,
"refDir",
attrib={"location" : reference_directory})
# Obtain the full path for all the files in the directory
ref_dir_list = listdir_fullpath(reference_directory)
for f in ref_dir_list:
ref_file = ET.SubElement(ref_dir,
"refFile",
attrib={"location" : f})
return ref_file
# Create the ring where the annotations are defined
def create_annotation_ring(root, reference_directory, annotation_file, genes_of_interest, contig_order):
""" Creates the ring that will contain the annotations for the reference genome.
Args:
root: ElementTree Element object containing the 'BRIG' tag and its attributes.
reference_directory (str): full path to the reference directory that contains
the fasta files used to build the rings.
annotation_file (str): Full path to the file containing annotations for the reference genome.
genes_of_interest (str): Full path to the file containing a list of specific genes.
contig_order (str): Full path to the tab-delimited file containing the order of the contigs.
"""
# Determine the position of the annotation ring, which will be the position after the last reference genome
ring_position = len(os.listdir(reference_directory)) + 2
# Create the annotation ring element
annotation_ring = ET.SubElement(root, 'ring', attrib=annotation_ring_attributes(str(ring_position)))
# Check for tab-delimited annotation file input
if list(SeqIO.parse(annotation_file, "genbank")) == []:
create_annotation_ring_tsv(annotation_ring, annotation_file)
else:
# Get the records of the Genbank file
records = [r for r in SeqIO.parse(annotation_file, "genbank")]
### Check if a contig order file has been provided
if len(records) > 1: # If more than 1 record exists, then the Genbank file is divided by contigs
create_annotation_ring_gbk_contigs(annotation_ring, annotation_file, records, genes_of_interest, contig_order)
else:
create_annotation_ring_gbk_concat(annotation_ring, annotation_file, genes_of_interest, records)
## Create remaining rings
def create_ring_element(root, reference_directory, colormap):
""" Creates the ring elements of the xml file, containing the position and color of the rings.
Args:
root: ElementTree Element object containing the 'BRIG' tag and its attributes.
reference_directory (str): full path to the reference directory that contains
the fasta files used to build the rings.
colormap (str): name of the colormap (available in matplotlib) to use for the color of the rings
Returns:
ring_number_element: ElementTree SubElement object containing the 'ring' tag and its attributes
ring_sequence_element: ElementTree SubElement object containing the 'sequence' tag and its attributes
"""
ref_dir_list = listdir_fullpath(reference_directory)
# Gets the colormap from matplotlib with as many colors as the number of files
cmap = cm.get_cmap(colormap, len(ref_dir_list))
list_colormap = cmap.colors.tolist()
# Remove the fourth element (transparency) because it is not necessary
colors_to_use = []
for l in list_colormap:
convert = [round(x * 255) for x in l]
convert.pop()
colors_to_use.append(convert)
#reversed_colors_to_use = colors_to_use[::-1]
# Check if the user provided an order for the rings
has_digit = [os.path.basename(x).split("_")[0].isdigit() for x in ref_dir_list]
if True in has_digit:
# Obtain the ring positions
ring_positions = [os.path.basename(x).split("_")[0] for x in ref_dir_list]
# Reverse sort the positions of the rings, because they will be created
# in a descending order of their positions
ring_positions.sort(reverse=True)
ref_dir_list.sort(reverse=True)
for ring in range(len(ref_dir_list)):
# The ring positions start at 2 due to the special rings (GC Content and GC Skew)
ring_position = int(ring_positions[ring]) + 1
# Select a color for the ring
ring_color = ",".join([str(e) for e in colors_to_use[ring]])
# Define the name of the ring
ring_name = os.path.basename(ref_dir_list[ring]).split("_")[1]
# Create the xml elements
ring_number_element = ET.SubElement(root,
'ring',
ring_attributes(ring_color, ring_name, str(ring_position)))
ring_sequence_element = ET.SubElement(ring_number_element,
"sequence",
attrib={"location" : ref_dir_list[ring]})
else:
# Sort files by lowercase
ref_dir_list.sort(key=lambda y: y.lower())
# The number of rings starts at 2 due to the GC Content and GC Skew
ring_number = len(ref_dir_list) + 1
for ring in range(len(ref_dir_list)):
# Select a color for the ring
ring_color = ",".join([str(e) for e in colors_to_use[ring]])
# Define the name of the ring
ring_name = os.path.basename(ref_dir_list[ring]).split("_")[0]
# Create the xml elements
ring_number_element = ET.SubElement(root,
'ring',
ring_attributes(ring_color, ring_name, str(ring_number)))
ring_sequence_element = ET.SubElement(ring_number_element,
"sequence",
attrib={"location" : ref_dir_list[ring]})
ring_number -= 1
return ring_number_element, ring_sequence_element
## Create special rings
def create_special_ring_element(root):
""" Create the 'special' ring element and its attributes.
Args:
root: ElementTree Element object containing the 'BRIG' tag and its attributes.
Returns:
gc_content_location: ElementTree SubElement object containing the 'sequence' tag and its attributes
gc_skew_location: ElementTree SubElement object containing the 'sequence' tag and its attributes
"""
# Create ring attributes
gc_content_ring_attrs = ring_attributes('225,0,0', "GC Content", "0")
gc_skew_ring_attrs = ring_attributes('225,0,0', "GC Skew", "1")
# Add ring element to root
gc_skew_ring = ET.SubElement(root, 'ring', attrib=gc_skew_ring_attrs)
gc_content_ring = ET.SubElement(root, 'ring', attrib=gc_content_ring_attrs)
# Add sequence element to ring
gc_content_location = ET.SubElement(gc_content_ring, 'sequence', attrib={'location' : 'GC Content'})
gc_skew_location = ET.SubElement(gc_skew_ring, 'sequence', attrib={'location' : 'GC Skew'})
return gc_content_location, gc_skew_location
def main(query_file, reference_directory, output_folder, output_xml, image_output_file, title, annotation_file,
genes_of_interest, contig_order, blast_options, legend_position, image_format, height, width, java_memory, colormap):
root = create_root_element(blast_options, legend_position, query_file,
output_folder, image_output_file, title, image_format)
cgview_settings = create_cgview_settings_element(root, height, width)
brig_settings = create_brig_settings_element(root, java_memory)
special = create_special_element(root)
refdir = create_reference_directory_element(root, reference_directory)
if annotation_file:
create_annotation_ring(root, reference_directory, annotation_file, genes_of_interest, contig_order)
rings = create_ring_element(root, reference_directory, colormap)
special_ring = create_special_ring_element(root)
write_xml(root, output_xml)
print("\n File written to {}".format(output_xml))
def parse_arguments():
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-q', '--query', type=str, required=True, dest='query_file',
help='Path to the query/reference FASTA file.')
parser.add_argument('-rfd', '--ref_dir', type=str, required=True, dest='reference_directory',
help='Path to the directory where the FASTA files to compare against the reference are located.')
parser.add_argument('-od', '--out_dir', type=str, required=True, dest='output_folder',
help='Path to the output directory for the results of BRIG.')
parser.add_argument('-of', '--out_xml', type=str, required=True, dest='output_file',
help='Path to the output of this script.')
parser.add_argument('-oi', '--out_img', type=str, required=True, dest='image_output_file',
help='Path to the output file of the resulting image of BRIG.')
parser.add_argument('-t', '--title', type=str, required=True, dest='title',
help='Title of the resulting image from BRIG.')
parser.add_argument('-a', '--annotation', type=str, required=False, dest='annotation_file', default=False,
help='File containing annotations for the reference genome. '
'The annoation file can be a tab-delimited file (.tsv) or a Genbank format file (.gbk, .gb)')
parser.add_argument('--genes', type=str, required=False, dest='genes_of_interest', default=[],
help='File containing a list of specific genes (one gene per line) to search when a Genbank annotation file is provided. ')
parser.add_argument('--contig_order', type=str, required=False, dest='contig_order', default=[],
help='Tab-delimited file containing the order of the contigs when a Genbank (divided by contigs) annotation file is provided. '
'Example: order contig '
'1 Contig8')
parser.add_argument('-b', '--blast_options', type=str, required=False, dest="blast_options", default="-evalue 0.001 -num_threads 6",
help='Options for running BLAST.')
parser.add_argument('-l', '--legend_pos', type=str, required=False, dest="legend_position", default="middle-right",
help='Positon of the legend on the resulting image.'
'The options available are upper, center or lower, '
'paired with left, center or right')
parser.add_argument('-if', '--image_format', type=str, required=False, dest="image_format", default="jpg",
help='Format of the resulting image file.'
'The available options are: jpg, png, svg or svgz.')
parser.add_argument('-ht', '--height', type=str, required=False, dest="height", default="3000",
help='Height (in pixels) of the resulting image.')
parser.add_argument('-wd', '--width', type=str, required=False, dest="width", default="3000",
help='Width (in pixels) of the resulting image.')
parser.add_argument('-jm', '--java_memory', type=str, required=False, dest="java_memory", default="1500",
help='Amount of memory (in bytes) that Java is allowed to use for BRIG.')
parser.add_argument('-cm', '--colormap', type=str, required=False, dest="colormap", default="viridis",
help='Colormap from matplotlib to use for the color of the rings. '
'The available options are: viridis, plasma, inferno, magma and cividis.'
'More options for colormaps at: '
'https://matplotlib.org/users/colormaps.html')
args = parser.parse_args()
return [args.query_file, args.reference_directory, args.output_folder, args.output_file,
args.image_output_file, args.title, args.annotation_file, args.genes_of_interest, args.contig_order,
args.blast_options, args.legend_position, args.image_format, args.height, args.width, args.java_memory, args.colormap]
if __name__ == '__main__':
args = parse_arguments()
main(args[0], args[1], args[2], args[3], args[4], args[5], args[6],
args[7], args[8], args[9], args[10], args[11], args[12], args[13],
args[14], args[15])
|
TAMU-CPT/galaxy-tools
|
tools/genome_viz/brigaid.py
|
brigaid.py
|
py
| 36,126 |
python
|
en
|
code
| 5 |
github-code
|
6
|
[
{
"api_name": "os.path.join",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "csv.DictReader",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree.SubElement",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 150,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.SubElement",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 151,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.SubElement",
"line_number": 198,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 198,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.SubElement",
"line_number": 199,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 199,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.SubElement",
"line_number": 262,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 262,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.SubElement",
"line_number": 263,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 263,
"usage_type": "name"
},
{
"api_name": "csv.DictReader",
"line_number": 325,
"usage_type": "call"
},
{
"api_name": "collections.OrderedDict",
"line_number": 330,
"usage_type": "call"
},
{
"api_name": "collections.OrderedDict",
"line_number": 337,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree.tostring",
"line_number": 392,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 392,
"usage_type": "name"
},
{
"api_name": "xml.dom.minidom.parseString",
"line_number": 394,
"usage_type": "call"
},
{
"api_name": "xml.dom.minidom",
"line_number": 394,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 430,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 430,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 434,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 434,
"usage_type": "attribute"
},
{
"api_name": "os.path.basename",
"line_number": 434,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree.Element",
"line_number": 437,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 437,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.SubElement",
"line_number": 500,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 500,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.SubElement",
"line_number": 543,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 543,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.SubElement",
"line_number": 565,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 565,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.SubElement",
"line_number": 566,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 566,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.SubElement",
"line_number": 587,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 587,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.SubElement",
"line_number": 595,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 595,
"usage_type": "name"
},
{
"api_name": "os.listdir",
"line_number": 619,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree.SubElement",
"line_number": 622,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 622,
"usage_type": "name"
},
{
"api_name": "Bio.SeqIO.parse",
"line_number": 625,
"usage_type": "call"
},
{
"api_name": "Bio.SeqIO",
"line_number": 625,
"usage_type": "name"
},
{
"api_name": "Bio.SeqIO.parse",
"line_number": 631,
"usage_type": "call"
},
{
"api_name": "Bio.SeqIO",
"line_number": 631,
"usage_type": "name"
},
{
"api_name": "matplotlib.cm.get_cmap",
"line_number": 663,
"usage_type": "call"
},
{
"api_name": "matplotlib.cm",
"line_number": 663,
"usage_type": "name"
},
{
"api_name": "os.path.basename",
"line_number": 677,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 677,
"usage_type": "attribute"
},
{
"api_name": "os.path.basename",
"line_number": 681,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 681,
"usage_type": "attribute"
},
{
"api_name": "os.path.basename",
"line_number": 697,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 697,
"usage_type": "attribute"
},
{
"api_name": "xml.etree.ElementTree.SubElement",
"line_number": 700,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 700,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.SubElement",
"line_number": 704,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 704,
"usage_type": "name"
},
{
"api_name": "os.path.basename",
"line_number": 721,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 721,
"usage_type": "attribute"
},
{
"api_name": "xml.etree.ElementTree.SubElement",
"line_number": 724,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 724,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.SubElement",
"line_number": 728,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 728,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.SubElement",
"line_number": 754,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 754,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.SubElement",
"line_number": 755,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 755,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.SubElement",
"line_number": 758,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 758,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.SubElement",
"line_number": 759,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 759,
"usage_type": "name"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 793,
"usage_type": "call"
},
{
"api_name": "argparse.RawDescriptionHelpFormatter",
"line_number": 794,
"usage_type": "attribute"
}
] |
71783932668
|
from setuptools import setup, find_packages
def readme():
with open('README.rst', encoding='utf-8') as f:
content = f.read()
return content
def get_version():
with open('VERSION', encoding='utf-8') as f:
version = f.read()
return version
setup(
name='thonny-quecpython',
version=get_version(),
description='quecpython programing kits for thonny',
long_description=readme(),
long_description_content_type='text/x-rst',
python_requires='>=3.7',
license="MIT License",
author='dustin.wei',
author_email='[email protected]',
keywords=["QuecPython", "quecpython", "QuecPython Kits", "quecpython kits"],
url='https://github.com/QuecPython/thonny-quecpython',
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
platforms=["windows"],
packages=find_packages(),
package_data={
"thonnycontrib.quecpython.fw": [
"fw_config.json",
"exes/aboot/*",
"exes/blf_tools/*",
"exes/NB/*",
"exes/rda/*",
],
},
install_requires=[
'thonny>=4.1.1',
'Pypubsub>=4.0.3'
],
)
|
wcache/thonny_quecpython
|
setup.py
|
setup.py
|
py
| 1,267 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "setuptools.setup",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "setuptools.find_packages",
"line_number": 34,
"usage_type": "call"
}
] |
34398796086
|
from typing import List
import docx
from .IngestorInterface import IngestorInterface
from .QuoteModel import QuoteModel
class DocxIngestor(IngestorInterface):
allowed_extensions = ['docx']
@classmethod
def parse(cls, path: str) -> List[QuoteModel]:
if not cls.can_ingest(path):
raise Exception('Cannot Ingest Exception')
quotes = []
doc = docx.Document(path)
for para in doc.paragraphs:
if para.text != "":
parsed = para.text.split('-')
parsed[0] = parsed[0].strip(' ').strip('"')
parsed[1] = parsed[1].strip(' ')
new_quote = QuoteModel(parsed[0], parsed[1])
quotes.append(new_quote)
return quotes
|
KosziDrimi/Meme-Generator-project
|
QuoteEngine/DocxIngestor.py
|
DocxIngestor.py
|
py
| 800 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "IngestorInterface.IngestorInterface",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "docx.Document",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "QuoteModel.QuoteModel",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "QuoteModel.QuoteModel",
"line_number": 12,
"usage_type": "name"
}
] |
38137944576
|
import dash
from dash import html, dcc
from dash.dependencies import Input, Output
import plotly.graph_objs as go
from plotly.subplots import make_subplots
import pandas as pd
# Load data
df = pd.read_csv("https://raw.githubusercontent.com/plotly/datasets/master/finance-charts-apple.csv")
# Create subplots
fig = make_subplots(rows=2, cols=1, shared_xaxes=True, vertical_spacing=0.05)
# Add candlestick chart
fig.add_trace(go.Candlestick(x=df['Date'],
open=df['AAPL.Open'],
high=df['AAPL.High'],
low=df['AAPL.Low'],
close=df['AAPL.Close'],
name='Candlestick'),
row=1, col=1)
# Add volume chart
fig.add_trace(go.Bar(x=df['Date'],
y=df['AAPL.Volume'],
name='Volume'),
row=2, col=1)
# Update layout
fig.update_layout(height=600, title_text="Candlestick and Volume Chart")
# Create Dash app
app = dash.Dash(__name__)
# Define dropdown options
dropdown_options = [{'label': 'Hour', 'value': '1H'},
{'label': 'Day', 'value': '1D'},
{'label': 'Week', 'value': '1W'},
{'label': 'Month', 'value': '1M'}]
# Define app layout
app.layout = html.Div(children=[
html.Label('Select timeframe:'),
dcc.Dropdown(id='timeframe-dropdown', options=dropdown_options, value='1H', clearable=False),
dcc.Graph(id='graph', figure=fig),
html.Br(),
])
# Define callback to update chart based on dropdown selection
@app.callback(Output('graph', 'figure'),
[Input('timeframe-dropdown', 'value')])
def update_chart(timeframe):
# Filter data based on selected timeframe
if timeframe == '1H':
df_filtered = df[-252:]
elif timeframe == '1D':
df_filtered = df[-126:]
elif timeframe == '1W':
df_filtered = df[-63:]
elif timeframe == '1M':
df_filtered = df[-21:]
# Create new chart based on filtered data
fig = make_subplots(rows=2, cols=1, shared_xaxes=True, vertical_spacing=0.05)
fig.add_trace(go.Candlestick(x=df_filtered['Date'],
open=df_filtered['AAPL.Open'],
high=df_filtered['AAPL.High'],
low=df_filtered['AAPL.Low'],
close=df_filtered['AAPL.Close'],
name='Candlestick'),
row=1, col=1)
fig.add_trace(go.Bar(x=df_filtered['Date'],
y=df_filtered['AAPL.Volume'],
name='Volume'),
row=2, col=1)
fig.update(layout_xaxis_rangeslider_visible=False)
return fig
# Run app
if __name__ == '__main__':
app.run_server(debug=True, port=5000)
|
TIIIIIIW/SOFTWARE-DEVELOPMENT-2
|
ML/Data/TestDash.py
|
TestDash.py
|
py
| 2,975 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "pandas.read_csv",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "plotly.subplots.make_subplots",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "plotly.graph_objs.Candlestick",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "plotly.graph_objs",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "plotly.graph_objs.Bar",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "plotly.graph_objs",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "dash.Dash",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "dash.html.Div",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "dash.html",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "dash.html.Label",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "dash.html",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "dash.dcc.Dropdown",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "dash.dcc",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "dash.dcc.Graph",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "dash.dcc",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "dash.html.Br",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "dash.html",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "plotly.subplots.make_subplots",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "plotly.graph_objs.Candlestick",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "plotly.graph_objs",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "plotly.graph_objs.Bar",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "plotly.graph_objs",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "dash.dependencies.Output",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "dash.dependencies.Input",
"line_number": 51,
"usage_type": "call"
}
] |
42242066819
|
from random import Random
from torch.utils.data import DataLoader
from torchvision import transforms, datasets
class Partition:
def __init__(self, data, index):
self.data = data
self.index = index
def __len__(self):
return len(self.index)
def __getitem__(self, idx):
data_idx = self.index[idx]
return self.data[data_idx]
class DataPartitioner:
def __init__(self, data, sizes=[1], seed=1340):
self.data = data
self.partitions = []
rng = Random()
rng.seed(seed)
data_len = len(data)
indexes = list(range(data_len))
rng.shuffle(indexes)
for part in sizes:
part_len = int(part * data_len)
self.partitions.append(indexes[0: part_len])
indexes = indexes[part_len:]
def use(self, rank):
return Partition(self.data, self.partitions[rank])
def get_mnist(data_dir, rank, size):
trans = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(0.1307, 0.3081),
])
batch_size = 128
num_workers = 8
download = True
dataset_train = datasets.MNIST(root=data_dir, train=False,
transform=trans,
download=download)
batch_size_part = int(batch_size / size)
partition_sizes = [1.0 / size for _ in range(size)]
paritition = DataPartitioner(dataset_train, partition_sizes)
paritition = paritition.use(rank)
train_data = DataLoader(dataset=paritition,
batch_size=batch_size_part,
num_workers=num_workers,
shuffle=True)
print('data shape', next(iter(train_data))[0].shape)
return train_data
if __name__ == '__main__':
data = get_mnist('~/data/', 0, 3)
|
DragonChen-TW/torch_DDP
|
data_partition.py
|
data_partition.py
|
py
| 1,832 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "random.Random",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms.Compose",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.ToTensor",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.Normalize",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "torchvision.datasets.MNIST",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "torchvision.datasets",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 56,
"usage_type": "call"
}
] |
12138793296
|
from dataclasses import fields
from pyexpat import model
import django
from django import forms
from django.db.models import fields
from django.contrib.auth.forms import UserCreationForm,AuthenticationForm
from django.forms import ModelForm
from .models import *
class CustomUserCreationForm(UserCreationForm):
class Meta:
model=Userreg
fields=('username','email','phoneno','password1','password2')
def clean(self):
val = super(CustomUserCreationForm, self).clean()
email = val.get("email")
if email == "":
self.add_error('email','this field is required')
return val
class EditUserForm(forms.ModelForm):
class Meta:
model=Userreg
fields=('username','email','phoneno')
class Productform(ModelForm):
class Meta:
model = Product
fields = ('product_name','description','category','mrp_price','stocks','product_image1','product_image2','product_image3','product_image4','descriptionone','descriptiontwo','descriptionthree','descriptionfour')
labels = {
'product_name':'Product name',
'description' : 'Description',
'descriptionone' : 'Specification 1',
'descriptiontwo' : 'Specification 2',
'descriptionthree' : 'Specification 3',
'descriptionfour' : 'Specification 4',
'category' : 'Category',
'mrp_price' : 'Price',
'stocks' : 'InStock',
'product_image1' : 'Cover Image 1',
'product_image2' : 'Cover Image 2',
'product_image3' : 'Cover Image 3',
'product_image4' : 'Cover Image 4',
}
class CategoryForm(forms.ModelForm):
class Meta:
model=Category
fields = '__all__'
class EditCategoryForm(forms.ModelForm):
class Meta:
model=Category
fields = '__all__'
class AddressForm(ModelForm):
class Meta:
model = Address
fields = ('type','first_name','last_name','mobile','email','address_lane_1','address_lane_2','city','district','state','country','pincode')
labels = {
'type' : 'Address Type',
'first_name':'First name',
'last_name' : 'Last name',
'mobile' : 'Mobile',
'address_lane_1' : 'Address Lane 1',
'address_lane_2' : 'Address Lane 1',
'city' : 'City',
'state' : 'State',
'country' : 'Country',
'pincode' : 'Pincode',
}
class DateInput(forms.DateTimeInput):
input_type = 'date'
# class CouponForm(ModelForm):
# class Meta:
# model=Coupon
# fields = ('coupon_title','coupon_code','coupon_limit','coupn_offer')
# labels = {
# 'coupon_title' : 'Coupon Title',
# 'coupon_code':'Coupon Code',
# 'coupon_limit' : 'Coupon Limit',
# 'coupn_offer' : 'Coupon Offer Price',
# }
class CouponApplyForm(forms.ModelForm):
class Meta:
model = CouponCode
fields = ['code','valid_from','valid_to','discount','active']
widgets = {
'valid_from': DateInput(),
'valid_to':DateInput(),
}
def __init__(self,*args,**kwargs):
super(CouponApplyForm, self).__init__(*args, **kwargs)
class EditCouponForm(forms.ModelForm):
class Meta:
model=CouponCode
fields=('code','valid_from','valid_to','discount','active')
class ProductOfferForm(forms.ModelForm):
class Meta:
model = ProductOffer
fields = ['code','product_id', 'valid_from','valid_to','discount','is_active']
widgets = {
'valid_from': DateInput(),
'valid_to':DateInput(),
}
def __init__(self,*args,**kwargs):
super(ProductOfferForm, self).__init__(*args, **kwargs)
class CategoryOfferForm(forms.ModelForm):
class Meta:
model = CategoryOffer
fields = ['code','category_id', 'valid_from','valid_to','discount','is_active']
widgets = {
'valid_from': DateInput(),
'valid_to': DateInput(),
}
def __init__(self,*args,**kwargs):
super(CategoryOfferForm, self).__init__(*args, **kwargs)
class EditCouponCatForm(forms.ModelForm):
class Meta:
model=CategoryOffer
fields=('code','category_id', 'valid_from','valid_to','discount','is_active')
class EditProductOffer(forms.ModelForm):
class Meta:
model=ProductOffer
fields=('code','product_id', 'valid_from','valid_to','discount','is_active')
|
baadhira/shopify-ecommerce
|
adminapp/forms.py
|
forms.py
|
py
| 4,635 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.contrib.auth.forms.UserCreationForm",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "pyexpat.model",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "django.db.models.fields",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "django.forms.ModelForm",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "pyexpat.model",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "django.db.models.fields",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "django.forms.ModelForm",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "pyexpat.model",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "django.db.models.fields",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "django.forms.ModelForm",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "pyexpat.model",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "django.db.models.fields",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "django.forms.ModelForm",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "pyexpat.model",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "django.db.models.fields",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "django.forms.ModelForm",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "pyexpat.model",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "django.db.models.fields",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "django.forms.DateTimeInput",
"line_number": 76,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "django.forms.ModelForm",
"line_number": 92,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 92,
"usage_type": "name"
},
{
"api_name": "pyexpat.model",
"line_number": 94,
"usage_type": "name"
},
{
"api_name": "django.db.models.fields",
"line_number": 95,
"usage_type": "name"
},
{
"api_name": "django.forms.ModelForm",
"line_number": 103,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 103,
"usage_type": "name"
},
{
"api_name": "pyexpat.model",
"line_number": 105,
"usage_type": "name"
},
{
"api_name": "django.db.models.fields",
"line_number": 106,
"usage_type": "name"
},
{
"api_name": "django.forms.ModelForm",
"line_number": 110,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 110,
"usage_type": "name"
},
{
"api_name": "pyexpat.model",
"line_number": 112,
"usage_type": "name"
},
{
"api_name": "django.db.models.fields",
"line_number": 113,
"usage_type": "name"
},
{
"api_name": "django.forms.ModelForm",
"line_number": 122,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 122,
"usage_type": "name"
},
{
"api_name": "pyexpat.model",
"line_number": 124,
"usage_type": "name"
},
{
"api_name": "django.db.models.fields",
"line_number": 125,
"usage_type": "name"
},
{
"api_name": "django.forms.ModelForm",
"line_number": 134,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 134,
"usage_type": "name"
},
{
"api_name": "pyexpat.model",
"line_number": 136,
"usage_type": "name"
},
{
"api_name": "django.db.models.fields",
"line_number": 137,
"usage_type": "name"
},
{
"api_name": "django.forms.ModelForm",
"line_number": 138,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 138,
"usage_type": "name"
},
{
"api_name": "pyexpat.model",
"line_number": 140,
"usage_type": "name"
},
{
"api_name": "django.db.models.fields",
"line_number": 141,
"usage_type": "name"
}
] |
27615702847
|
"""
Get 10 titles of the most popular movies/series etc. by each genre.
Получите 10 наименований самых популярных фильмов/сериалов и т. д. в каждом жанре.
title.basics.tsv.gz title.ratings.tsv.gz
"""
from pyspark import SparkConf
from pyspark.sql import SparkSession
import pyspark.sql.types as t
import pyspark.sql.functions as f
from pyspark.sql import Window
def task8():
spark_session = (SparkSession.builder
.master("local")
.appName("task app")
.config(conf=SparkConf())
.getOrCreate())
schema_title_basics = t.StructType([
t.StructField("tconst", t.StringType(), nullable=True),
t.StructField("titleType", t.StringType(), nullable=True),
t.StructField("primaryTitle", t.StringType(), nullable=True),
t.StructField("originalTitle", t.StringType(), nullable=True),
t.StructField("isAdult", t.StringType(), nullable=True),
t.StructField("startYear", t.IntegerType(), nullable=True),
t.StructField("endYear", t.IntegerType(), nullable=True),
t.StructField("runtimeMinutes", t.IntegerType(), nullable=True),
t.StructField("genres", t.StringType(), nullable=True),
])
schema_ratings_basics = t.StructType([
t.StructField("tconst", t.StringType(), nullable=True),
t.StructField("averageRating", t.DoubleType(), nullable=True),
t.StructField("numVotes", t.IntegerType(), nullable=True)
])
file_read_basics = r'.\Data\input\title.basics.tsv.gz'
file_read_ratings = r'.\Data\input\title.ratings.tsv.gz'
from_csv_basics_df = spark_session.read.csv(
file_read_basics, header=True, nullValue='null', sep=r'\t', schema=schema_title_basics)
from_csv_ratings_df = spark_session.read.csv(
file_read_ratings, header=True, nullValue='null', sep=r'\t', schema=schema_ratings_basics)
temp_df1 = from_csv_basics_df.withColumn("genres", f.explode(f.split(f.col("genres"), ",")))
temp_df1 = temp_df1.select("tconst", "titleType", "primaryTitle", "genres")
temp_df2 = from_csv_ratings_df.select("tconst", "averageRating")
temp_df3 = temp_df1.join(temp_df2, "tconst")
window = (Window.orderBy(f.desc("genres"), f.desc("averageRating")).partitionBy("genres"))
from_csv_df_task8 = temp_df3.withColumn("Rating_genre", f.row_number().over(window)).where(f.col("Rating_genre") <= 10)
#from_csv_df_task8.show(100)
file_write = r'.\Data\output\task08'
from_csv_df_task8.write.csv(file_write, header=True, mode="overwrite")
return 0
|
Tetyana83/spark
|
task8.py
|
task8.py
|
py
| 2,591 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pyspark.sql.SparkSession.builder.master",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.SparkSession.builder",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "pyspark.sql.SparkSession",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "pyspark.SparkConf",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types.StructType",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "pyspark.sql.types.StructField",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "pyspark.sql.types.StringType",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types.StructField",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "pyspark.sql.types.StringType",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types.StructField",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "pyspark.sql.types.StringType",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types.StructField",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "pyspark.sql.types.StringType",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types.StructField",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "pyspark.sql.types.StringType",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types.StructField",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "pyspark.sql.types.IntegerType",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types.StructField",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "pyspark.sql.types.IntegerType",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types.StructField",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "pyspark.sql.types.IntegerType",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types.StructField",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "pyspark.sql.types.StringType",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types.StructType",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "pyspark.sql.types.StructField",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "pyspark.sql.types.StringType",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types.StructField",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "pyspark.sql.types.DoubleType",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types.StructField",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "pyspark.sql.types.IntegerType",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.functions.explode",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.functions",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "pyspark.sql.functions.split",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.functions.col",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.Window.orderBy",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.Window",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "pyspark.sql.functions.desc",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.functions",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "pyspark.sql.functions.row_number",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.functions",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "pyspark.sql.functions.col",
"line_number": 44,
"usage_type": "call"
}
] |
36551632681
|
import matplotlib.pyplot as plt
from DataHandler import DataHandler
from LinearClassifier import LinearClassifier
if __name__ == '__main__':
#generating a normal distributed data (1000 samples per class)
data_handler = DataHandler()
class0Dataset = data_handler.get2DGaussian(1000, [-2, -2])
class1Dataset = data_handler.get2DGaussian(1000, [2, 2])
#labling the data
class0Dataset = data_handler.label(class0Dataset, 0)
class1Dataset = data_handler.label(class1Dataset, 1)
#shuffling the data
dataset = data_handler.shuffle(class0Dataset + class1Dataset)
###############################################################
classifier = LinearClassifier()
print("initial weights : ", classifier.weights)
print("initial bais : ", classifier.bais)
actual = [row[-1] for row in dataset]
pridected = [classifier.predict(row) for row in dataset]
print("Accuracy before training : %.2f%%\n" %
data_handler.accuracy(actual, pridected))
classifier.plot()
plt.show()
learning_rate = 0.01
n_folds = 5
n_epoch = 2
scores = data_handler.evaluate_model(dataset, classifier, n_folds, learning_rate, n_epoch)
print('Scores: %s' % scores)
print('Average Accuracy: %.2f%%' % (sum(scores) / float(len(scores))))
print("final weights : ", classifier.weights)
print("final bais : ", classifier.bais)
# plot results
x, y, label = zip(*class0Dataset)
X, Y, label = zip(*class1Dataset)
plt.plot(x, y, 'x')
plt.plot(X, Y, 'x')
classifier.plot()
plt.show()
|
Mustapha-Belkacim/Linear-classifier
|
main.py
|
main.py
|
py
| 1,623 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "DataHandler.DataHandler",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "LinearClassifier.LinearClassifier",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 47,
"usage_type": "name"
}
] |
12091024325
|
from typing import List, Iterator
import torch
from torch.utils.data.sampler import Sampler
from nltk import Tree
from nltk.tokenize.treebank import TreebankWordTokenizer
class TokenizedLengthSampler(Sampler[List[int]]):
"""
PyTorch DataLoader - compatible sampler class that batchify sentences with the most similar lengths for maximum efficiency.
"""
def __init__(self, data_source: List[str], batch_size: int, seed: int):
self.data_source = data_source
self.length = len(data_source)
self.batch_size = batch_size
tokenize = TreebankWordTokenizer().tokenize
seq_lengths = [len(tokenize(sent)) for sent in data_source]
indices = list(range(len(data_source)))
indices = sorted(indices, key=lambda i: seq_lengths[i])
batches = []
if self.length % self.batch_size != 0 :
batches.append(indices[:self.length % self.batch_size])
for start in range(self.length % self.batch_size, self.length, batch_size):
end = start + batch_size
batches.append(indices[start:end])
self.length_batches = len(batches)
self.batches = [batches[i] for i in torch.randperm(n=self.length_batches, dtype=torch.long).tolist()]
self.seq_lengths = seq_lengths
def __len__(self):
return self.length_batches
def __iter__(self) -> Iterator[List[int]]:
for batch in self.batches:
yield batch
|
jinulee-v/bert_diora
|
bert_diora/utils.py
|
utils.py
|
py
| 1,470 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "torch.utils.data.sampler.Sampler",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "nltk.tokenize.treebank.TreebankWordTokenizer",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "torch.randperm",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "torch.long",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "typing.Iterator",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 38,
"usage_type": "name"
}
] |
75137098747
|
from django.contrib.auth.backends import ModelBackend
from django.contrib.auth.models import Permission
from .models import Organization, OrganizationUser
class OrganizationBackend(ModelBackend):
supports_object_permissions = True
def authenticate(self, organization=None, username=None, password=None):
if organization is None:
return None
try:
organization = Organization.objects.get(code__iexact=organization)
except Organization.DoesNotExist:
return None
try:
user = OrganizationUser.objects.get(organization=organization,
username__iexact=username,
user__is_active=True)
if user.check_password(password):
return user
except OrganizationUser.DoesNotExist:
return None
def _create_permission_set(self, perms=None):
"""
Expects a queryset of permissions, returns a formatted
set.
"""
if perms is None:
return set()
if isinstance(perms, (list, tuple)):
perms = [(perm.content_type.app_label, perm.codename)
for perm in perms]
else:
perms = perms.values_list('content_type__app_label',
'codename').order_by()
return set(['%s.%s' % (ct, name) for ct, name in perms])
def get_group_permissions(self, user_obj, obj=None):
"""
Returns a set of all permission strings that this user has through
his/her roles for the given object.
We accomplish this by pulling the set of all available permissions, then
checking the object. A superuser immediately gets all of the available
permissions, and a super role gets all of their super role permissions.
The supplied object can be None, an `Organization` object,
or an object with an organization attribute.
If the object is None, then this function returns all permissions that
this user has available, regardless of object. This facilitates
situations where you want to limit functionality based off of whether or
not a permission exists at all.
If the object is an `Organization` object, we only return permissions
granted via SuperRoles and Roles the user is a member of, that are part
of the supplied organization.
If the supplied object has an `organization` attribute (or an
_ORGANIZATION_ATTRIBUTE attribute with the name of an actual attribute
that returns an `Organization` object), then the returned permissions
are all permissions granted via SuperRoles, as well as permissions
granted from Roles that the user is a member of, that are part of the
organization that owns the object.
Finally, if an object is supplied, but it is not an `Organization`
object, nor does it have an attribute that points to an `Organization`
object, then return all available permissions (as if the supplied object
was None)
"""
# superusers get all permissions, like usual
if user_obj.is_superuser:
perms = Permission.objects.all()
return self._create_permission_set(perms)
# if the user is not an OrganizationUser, they get no permissions
if not isinstance(user_obj, OrganizationUser):
return set()
# if the user is not in any roles, they get no permissions
if not any([user_obj.super_roles.count(), user_obj.roles.count()]):
return set()
# at this point, they should have some permissions
# start off with the set of super role permissions
perms = Permission.objects.filter(superrole__organizationuser=user_obj)
# next, get the set of permissions provided by the regular roles
if isinstance(obj, Organization):
# if the supplied object is an `Organization` object
object_org = obj
else:
# check the object's organization
attname = getattr(obj, '_ORGANIZATION_ATTRIBUTE', 'organization')
# if no object was passed in, or the object doesn't have an
# organization attribute, include all permissions from all roles
if obj is None or not hasattr(obj, attname):
roles = user_obj.roles.all()
perms = perms | Permission.objects.filter(role__in=roles)
# done calculating at this point, return early
return self._create_permission_set(perms)
# At this point, we know the object is not None and the object
# has an organization attribute, so fetch the value of the
# organization
object_org = getattr(obj, attname, None)
# If the value of the organization attribute is None, then return
# the currently collected permissions
if object_org is None:
return self._create_permission_set(perms)
# Finally, collect the permissions this user has on this object, based
# off of the set of organizations they are a member of
# If the user is not a member of the organization attached to this
# object, then return the collected permissions
if object_org not in user_obj.get_all_organizations():
return self._create_permission_set(perms)
# The user is in the organization that owns this object, so collect
# all of the permissions this user has for this organization
roles = user_obj.roles.filter(organization=object_org)
perms = perms | Permission.objects.filter(role__in=roles)
return self._create_permission_set(perms)
def get_all_permissions(self, user_obj, obj=None):
if user_obj.is_anonymous():
return set()
# we don't support user permissions
return self.get_group_permissions(user_obj, obj=obj)
def has_perm(self, user_obj, perm, obj=None):
if not user_obj.is_active:
return False
return perm in self.get_all_permissions(user_obj, obj=obj)
def has_module_perms(self, user_obj, app_label, obj=None):
if not user_obj.is_active:
return False
for perm in self.get_all_permissions(user_obj, obj=obj):
if perm[:perm.index('.')] == app_label:
return True
return False
def get_user(self, user_id):
try:
return OrganizationUser.objects.get(pk=user_id)
except OrganizationUser.DoesNotExist:
return None
|
avidal/django-organizations
|
organizations/backends.py
|
backends.py
|
py
| 6,718 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "django.contrib.auth.backends.ModelBackend",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "models.Organization.objects.get",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "models.Organization.objects",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "models.Organization",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "models.Organization.DoesNotExist",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "models.Organization",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "models.OrganizationUser.objects.get",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "models.OrganizationUser.objects",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "models.OrganizationUser",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "models.OrganizationUser.DoesNotExist",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "models.OrganizationUser",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "django.contrib.auth.models.Permission.objects.all",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.models.Permission.objects",
"line_number": 86,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.auth.models.Permission",
"line_number": 86,
"usage_type": "name"
},
{
"api_name": "models.OrganizationUser",
"line_number": 90,
"usage_type": "argument"
},
{
"api_name": "django.contrib.auth.models.Permission.objects.filter",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.models.Permission.objects",
"line_number": 100,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.auth.models.Permission",
"line_number": 100,
"usage_type": "name"
},
{
"api_name": "models.Organization",
"line_number": 104,
"usage_type": "argument"
},
{
"api_name": "django.contrib.auth.models.Permission.objects.filter",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.models.Permission.objects",
"line_number": 115,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.auth.models.Permission",
"line_number": 115,
"usage_type": "name"
},
{
"api_name": "django.contrib.auth.models.Permission.objects.filter",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.models.Permission.objects",
"line_number": 141,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.auth.models.Permission",
"line_number": 141,
"usage_type": "name"
},
{
"api_name": "models.OrganizationUser.objects.get",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "models.OrganizationUser.objects",
"line_number": 170,
"usage_type": "attribute"
},
{
"api_name": "models.OrganizationUser",
"line_number": 170,
"usage_type": "name"
},
{
"api_name": "models.OrganizationUser.DoesNotExist",
"line_number": 171,
"usage_type": "attribute"
},
{
"api_name": "models.OrganizationUser",
"line_number": 171,
"usage_type": "name"
}
] |
70384349309
|
import numpy as np
import scipy
import cv2
import matplotlib.pyplot as plt
from matplotlib.colors import LightSource
def rotate_and_crop(arr, ang):
"""Array arr to be rotated by ang degrees and cropped afterwards"""
arr_rot = scipy.ndimage.rotate(arr, ang, reshape=True, order=0)
shift_up = np.ceil(np.arcsin(abs(ang) / 360 * 2 * np.pi) * arr.shape[1])
shift_right = np.ceil(np.arcsin(abs(ang) / 360 * 2 * np.pi) * arr.shape[0])
arr_crop = arr_rot[
int(shift_up) : arr_rot.shape[0] - int(shift_up),
int(shift_right) : arr_rot.shape[1] - int(shift_right),
]
return arr_crop
def contourf_to_array(cs, nbpixels_x, nbpixels_y, scale_x, scale_y):
"""Draws filled contours from contourf or tricontourf cs on output array of size (nbpixels_x, nbpixels_y)"""
image = np.zeros((nbpixels_x, nbpixels_y)) - 5
for i, collection in enumerate(cs.collections):
z = cs.levels[i] # get contour levels from cs
for path in collection.get_paths():
verts = (
path.to_polygons()
) # get vertices of current contour level (is a list of arrays)
for v in verts:
# rescale vertices to image size
v[:, 0] = (
(v[:, 0] - np.min(scale_x))
/ (np.max(scale_x) - np.min(scale_x))
* nbpixels_y
)
v[:, 1] = (
(v[:, 1] - np.min(scale_y))
/ (np.max(scale_y) - np.min(scale_y))
* nbpixels_x
)
poly = np.array(
[v], dtype=np.int32
) # dtype integer is necessary for the next instruction
cv2.fillPoly(image, poly, z)
return image
def contourf_to_array_3d(cs, nbpixels_x, nbpixels_y, scale_x, scale_y, levels):
res = np.zeros((nbpixels_x, nbpixels_y, cs.shape[-1])) - 5
for i in range(res.shape[-1]):
cf = plt.contourf(scale_x, scale_y, cs[:, :, i], levels=levels)
res[:, :, i] = np.flip(
contourf_to_array(cf, nbpixels_x, nbpixels_y, scale_x, scale_y), axis=0
)
res[:, :, i][np.where(res[:, :, i] < -4)] = np.nan
plt.close("all")
return res
def create_bounds():
bounds_k = {
'x': {
'min': 137089.2373932857299224,
'max': 168249.9520578108495101,
},
'y': {
'min': 589482.3877100050449371,
'max': 610702.8749795859912410,
},
}
bounds_g = {
'x': {
'min': 129971.5049754020292312,
'max': 170784.9834783510013949,
},
'y': {
'min': 584191.5390384565107524,
'max': 611985.5710535547696054,
},
}
for key, value in bounds_k.items():
bounds_k[key]['delta'] = ((bounds_k[key]['max'] - bounds_k[key]['min']) / (8.94 / 10.22) - (bounds_k[key]['max'] - bounds_k[key]['min'])) / 2
bounds_g[key]['delta'] = ((bounds_g[key]['max'] - bounds_g[key]['min']) / (8.94 / 10.22) - (bounds_g[key]['max'] - bounds_g[key]['min'])) / 2
bounds_k[key]['min'] -= bounds_k[key]['delta']
bounds_k[key]['max'] += bounds_k[key]['delta']
bounds_g[key]['min'] -= bounds_g[key]['delta']
bounds_g[key]['max'] += bounds_g[key]['delta']
return bounds_k, bounds_g
def get_bathymetry_extent(ds, bounds):
print(np.where(ds.x.values >= bounds['x']['min'])[0].min())
x_index_min = np.where(ds.x.values >= bounds['x']['min'])[0].min() - 1
x_index_max = np.where(ds.x.values >= bounds['x']['max'])[0].min()
y_index_min = np.where(ds.y.values >= bounds['y']['min'])[0].max() + 1
y_index_max = np.where(ds.y.values >= bounds['y']['max'])[0].max()
extent = (x_index_min, x_index_max, y_index_min, y_index_max)
return extent
def prep_bathymetry_data(ds, extent):
x_b, y_b = np.array(ds.x[extent[0]:extent[1]]), np.array(ds.y[extent[3]:extent[2]])
bodem = np.array(ds[0, extent[3]:extent[2], extent[0]:extent[1]])
bodem[np.where(bodem == -9999)] = -43.8
return x_b, y_b, bodem
def get_conc_extent(ds, x_b, y_b):
x_min = np.where(x_b >= ds.x.values.min())[0].min() - 1
x_max = np.where(x_b <= ds.x.values.max())[0].max() + 1
y_min = np.where(y_b >= ds.y.values.min())[0].max() + 1
y_max = np.where(y_b <= ds.y.values.max())[0].min() - 1
extent = (x_min, x_max, y_min, y_max)
return extent
def rescale_and_fit_ds(ds, ds_bounds, rescale_size1, rescale_size2, axis=0):
"""
Rescales dataset ds to fit over the satellite image with size rescale_size2
It also fits the dataset values such that the x and y bounds of the dataset are
placed on the right positions over the satellite image
Input:
ds - dataset to rescale and fit
ds_bounds - indices of the bounds of the bathymetry, corresponding to the ds bounds
rescale_size1 - shape of the bathymetry
rescale_size2 - shape of the satellite image
axis - axis of ds over which we want to rescale and fit
Output:
ds_rescaled - dataset with known values on the right positions over the satellite and
nan's everywhere else
"""
xmin, ymin = ds_bounds[0]
xmax, ymax = ds_bounds[1]
ds_sub = np.zeros(rescale_size1)
ds_sub[:] = np.nan
for i in range(ds.shape[axis]):
ds_inter = cv2.resize(ds[i, :, :], dsize=(xmax - xmin, ymin - ymax), interpolation=cv2.INTER_CUBIC)
ds_sub[ymax:ymin, xmin:xmax] = ds_inter
ds_sub2 = np.expand_dims(cv2.resize(ds_sub, dsize=(rescale_size2[1], rescale_size2[0]), interpolation=cv2.INTER_CUBIC), axis=axis)
if i == 0:
ds_rescaled = ds_sub2
else:
ds_rescaled = np.concatenate([ds_rescaled, ds_sub2], axis=axis)
return ds_rescaled
def create_shaded_image(sat, bodem, shape):
ls = LightSource(azdeg=315, altdeg=45)
# Create shade using lightsource
rgb = ls.hillshade(bodem, vert_exag=5, dx=20, dy=20)
# Scale satellite image to bathymetry shapes
sat_scaled = cv2.resize(sat, dsize=(bodem.shape[1], bodem.shape[0]), interpolation=cv2.INTER_CUBIC).astype('float64')
# Add shade to scaled image
img_shade = ls.shade_rgb(sat_scaled, bodem, vert_exag=5, blend_mode='soft')
img_shade = cv2.resize(img_shade, dsize=shape, interpolation=cv2.INTER_CUBIC).astype('float64')
return img_shade
|
openearth/vcl
|
vcl/data.py
|
data.py
|
py
| 6,478 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "scipy.ndimage.rotate",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "scipy.ndimage",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "numpy.ceil",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.arcsin",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "numpy.ceil",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.arcsin",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "numpy.min",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "numpy.min",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "numpy.min",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "numpy.min",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "numpy.int32",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "cv2.fillPoly",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.contourf",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "numpy.flip",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "numpy.nan",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.close",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "numpy.where",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "numpy.nan",
"line_number": 137,
"usage_type": "attribute"
},
{
"api_name": "cv2.resize",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "cv2.INTER_CUBIC",
"line_number": 139,
"usage_type": "attribute"
},
{
"api_name": "numpy.expand_dims",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "cv2.resize",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "cv2.INTER_CUBIC",
"line_number": 141,
"usage_type": "attribute"
},
{
"api_name": "numpy.concatenate",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "matplotlib.colors.LightSource",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "cv2.resize",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "cv2.INTER_CUBIC",
"line_number": 154,
"usage_type": "attribute"
},
{
"api_name": "cv2.resize",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "cv2.INTER_CUBIC",
"line_number": 158,
"usage_type": "attribute"
}
] |
29476131174
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""Fine tune CTC network (CSJ corpus, for dialog)."""
import os
import sys
import time
import tensorflow as tf
from setproctitle import setproctitle
import yaml
import shutil
import tensorflow.contrib.slim as slim
sys.path.append('../')
sys.path.append('../../')
sys.path.append('../../../')
from data.read_dataset_ctc import DataSet
from data.read_dataset_ctc_dialog import DataSet as DataSetDialog
from models.ctc.load_model import load
from evaluation.eval_ctc import do_eval_per, do_eval_cer
from evaluation.eval_ctc_dialog import do_eval_fmeasure
from utils.data.sparsetensor import list2sparsetensor, sparsetensor2list
from utils.util import mkdir, join
from utils.parameter import count_total_parameters
from utils.loss import save_loss
from utils.labels.phone import num2phone
from utils.labels.character import num2char
def do_fine_tune(network, optimizer, learning_rate, batch_size, epoch_num,
label_type, num_stack, num_skip, social_signal_type,
trained_model_path, restore_epoch=None):
"""Run training.
Args:
network: network to train
optimizer: adam or adadelta or rmsprop
learning_rate: initial learning rate
batch_size: size of mini batch
epoch_num: epoch num to train
label_type: phone or character
num_stack: int, the number of frames to stack
num_skip: int, the number of frames to skip
social_signal_type: insert or insert2 or insert3 or remove
trained_model_path: path to the pre-trained model
restore_epoch: epoch of the model to restore
"""
# Tell TensorFlow that the model will be built into the default graph
with tf.Graph().as_default():
# Read dataset
train_data = DataSetDialog(data_type='train', label_type=label_type,
social_signal_type=social_signal_type,
num_stack=num_stack, num_skip=num_skip,
is_sorted=True)
dev_data = DataSetDialog(data_type='dev', label_type=label_type,
social_signal_type=social_signal_type,
num_stack=num_stack, num_skip=num_skip,
is_sorted=False)
test_data = DataSetDialog(data_type='test', label_type=label_type,
social_signal_type=social_signal_type,
num_stack=num_stack, num_skip=num_skip,
is_sorted=False)
# TODO:作る
# eval1_data = DataSet(data_type='eval1', label_type=label_type,
# social_signal_type=social_signal_type,
# num_stack=num_stack, num_skip=num_skip,
# is_sorted=False)
# eval2_data = DataSet(data_type='eval2', label_type=label_type,
# social_signal_type=social_signal_type,
# num_stack=num_stack, num_skip=num_skip,
# is_sorted=False)
# eval3_data = DataSet(data_type='eval3', label_type=label_type,
# social_signal_type=social_signal_type,
# num_stack=num_stack, num_skip=num_skip,
# is_sorted=False)
# Add to the graph each operation
loss_op = network.loss()
train_op = network.train(optimizer=optimizer,
learning_rate_init=learning_rate,
is_scheduled=False)
decode_op = network.decoder(decode_type='beam_search',
beam_width=20)
per_op = network.ler(decode_op)
# Build the summary tensor based on the TensorFlow collection of
# summaries
summary_train = tf.summary.merge(network.summaries_train)
summary_dev = tf.summary.merge(network.summaries_dev)
# Add the variable initializer operation
init_op = tf.global_variables_initializer()
# Create a saver for writing training checkpoints
saver = tf.train.Saver(max_to_keep=None)
# Count total parameters
parameters_dict, total_parameters = count_total_parameters(
tf.trainable_variables())
for parameter_name in sorted(parameters_dict.keys()):
print("%s %d" % (parameter_name, parameters_dict[parameter_name]))
print("Total %d variables, %s M parameters" %
(len(parameters_dict.keys()), "{:,}".format(total_parameters / 1000000)))
csv_steps = []
csv_train_loss = []
csv_dev_loss = []
# Create a session for running operation on the graph
with tf.Session() as sess:
# Instantiate a SummaryWriter to output summaries and the graph
summary_writer = tf.summary.FileWriter(
network.model_dir, sess.graph)
# Initialize parameters
sess.run(init_op)
# Restore pre-trained model's parameters
ckpt = tf.train.get_checkpoint_state(trained_model_path)
if ckpt:
# Use last saved model
model_path = ckpt.model_checkpoint_path
if restore_epoch is not None:
model_path = model_path.split('/')[:-1]
model_path = '/'.join(model_path) + \
'/model.ckpt-' + str(restore_epoch)
else:
raise ValueError('There are not any checkpoints.')
exclude = ['output/Variable', 'output/Variable_1']
variables_to_restore = slim.get_variables_to_restore(
exclude=exclude)
restorer = tf.train.Saver(variables_to_restore)
restorer.restore(sess, model_path)
print("Model restored: " + model_path)
# Train model
iter_per_epoch = int(train_data.data_num / batch_size)
if (train_data.data_num / batch_size) != int(train_data.data_num / batch_size):
iter_per_epoch += 1
max_steps = iter_per_epoch * epoch_num
start_time_train = time.time()
start_time_epoch = time.time()
start_time_step = time.time()
fmean_best = 0
for step in range(max_steps):
# Create feed dictionary for next mini batch (train)
inputs, labels, seq_len, _ = train_data.next_batch(
batch_size=batch_size)
indices, values, dense_shape = list2sparsetensor(labels)
feed_dict_train = {
network.inputs_pl: inputs,
network.label_indices_pl: indices,
network.label_values_pl: values,
network.label_shape_pl: dense_shape,
network.seq_len_pl: seq_len,
network.keep_prob_input_pl: network.dropout_ratio_input,
network.keep_prob_hidden_pl: network.dropout_ratio_hidden,
network.lr_pl: learning_rate
}
# Create feed dictionary for next mini batch (dev)
inputs, labels, seq_len, _ = dev_data.next_batch(
batch_size=batch_size)
indices, values, dense_shape = list2sparsetensor(labels)
feed_dict_dev = {
network.inputs_pl: inputs,
network.label_indices_pl: indices,
network.label_values_pl: values,
network.label_shape_pl: dense_shape,
network.seq_len_pl: seq_len,
network.keep_prob_input_pl: network.dropout_ratio_input,
network.keep_prob_hidden_pl: network.dropout_ratio_hidden
}
# Update parameters & compute loss
_, loss_train = sess.run(
[train_op, loss_op], feed_dict=feed_dict_train)
loss_dev = sess.run(loss_op, feed_dict=feed_dict_dev)
csv_steps.append(step)
csv_train_loss.append(loss_train)
csv_dev_loss.append(loss_dev)
if (step + 1) % 10 == 0:
# Change feed dict for evaluation
feed_dict_train[network.keep_prob_input_pl] = 1.0
feed_dict_train[network.keep_prob_hidden_pl] = 1.0
feed_dict_dev[network.keep_prob_input_pl] = 1.0
feed_dict_dev[network.keep_prob_hidden_pl] = 1.0
# Compute accuracy & \update event file
ler_train, summary_str_train = sess.run([per_op, summary_train],
feed_dict=feed_dict_train)
ler_dev, summary_str_dev, labels_st = sess.run([per_op, summary_dev, decode_op],
feed_dict=feed_dict_dev)
summary_writer.add_summary(summary_str_train, step + 1)
summary_writer.add_summary(summary_str_dev, step + 1)
summary_writer.flush()
# Decode
# try:
# labels_pred = sparsetensor2list(labels_st, batch_size)
# except:
# labels_pred = [[0] * batch_size]
duration_step = time.time() - start_time_step
print('Step %d: loss = %.3f (%.3f) / ler = %.4f (%.4f) (%.3f min)' %
(step + 1, loss_train, loss_dev, ler_train, ler_dev, duration_step / 60))
# if label_type == 'character':
# if social_signal_type == 'remove':
# map_file_path = '../evaluation/mapping_files/ctc/char2num_remove.txt'
# else:
# map_file_path = '../evaluation/mapping_files/ctc/char2num_' + \
# social_signal_type + '.txt'
# print('True: %s' % num2char(labels[-1], map_file_path))
# print('Pred: %s' % num2char(
# labels_pred[-1], map_file_path))
# elif label_type == 'phone':
# if social_signal_type == 'remove':
# map_file_path = '../evaluation/mapping_files/ctc/phone2num_remove.txt'
# else:
# map_file_path = '../evaluation/mapping_files/ctc/phone2num_' + \
# social_signal_type + '.txt'
# print('True: %s' % num2phone(
# labels[-1], map_file_path))
# print('Pred: %s' % num2phone(
# labels_pred[-1], map_file_path))
sys.stdout.flush()
start_time_step = time.time()
# Save checkpoint and evaluate model per epoch
if (step + 1) % iter_per_epoch == 0 or (step + 1) == max_steps:
duration_epoch = time.time() - start_time_epoch
epoch = (step + 1) // iter_per_epoch
print('-----EPOCH:%d (%.3f min)-----' %
(epoch, duration_epoch / 60))
# Save model (check point)
checkpoint_file = os.path.join(
network.model_dir, 'model.ckpt')
save_path = saver.save(
sess, checkpoint_file, global_step=epoch)
print("Model saved in file: %s" % save_path)
start_time_eval = time.time()
if label_type == 'character':
print('■Dev Evaluation:■')
fmean_epoch = do_eval_fmeasure(session=sess, decode_op=decode_op,
network=network, dataset=dev_data,
label_type=label_type,
social_signal_type=social_signal_type)
# error_epoch = do_eval_cer(session=sess,
# decode_op=decode_op,
# network=network,
# dataset=dev_data,
# eval_batch_size=batch_size)
if fmean_epoch > fmean_best:
fmean_best = fmean_epoch
print('■■■ ↑Best Score (F-measure)↑ ■■■')
do_eval_fmeasure(session=sess, decode_op=decode_op,
network=network, dataset=test_data,
label_type=label_type,
social_signal_type=social_signal_type)
# print('■eval1 Evaluation:■')
# do_eval_cer(session=sess, decode_op=decode_op,
# network=network, dataset=eval1_data,
# eval_batch_size=batch_size)
# print('■eval2 Evaluation:■')
# do_eval_cer(session=sess, decode_op=decode_op,
# network=network, dataset=eval2_data,
# eval_batch_size=batch_size)
# print('■eval3 Evaluation:■')
# do_eval_cer(session=sess, decode_op=decode_op,
# network=network, dataset=eval3_data,
# eval_batch_size=batch_size)
else:
print('■Dev Evaluation:■')
fmean_epoch = do_eval_fmeasure(session=sess, decode_op=decode_op,
network=network, dataset=dev_data,
label_type=label_type,
social_signal_type=social_signal_type)
# error_epoch = do_eval_per(session=sess,
# per_op=per_op,
# network=network,
# dataset=dev_data,
# eval_batch_size=batch_size)
if fmean_epoch < fmean_best:
fmean_best = fmean_epoch
print('■■■ ↑Best Score (F-measure)↑ ■■■')
do_eval_fmeasure(session=sess, decode_op=decode_op,
network=network, dataset=test_data,
label_type=label_type,
social_signal_type=social_signal_type)
# print('■eval1 Evaluation:■')
# do_eval_per(session=sess, per_op=per_op,
# network=network, dataset=eval1_data,
# eval_batch_size=batch_size)
# print('■eval2 Evaluation:■')
# do_eval_per(session=sess, per_op=per_op,
# network=network, dataset=eval2_data,
# eval_batch_size=batch_size)
# print('■eval3 Evaluation:■')
# do_eval_per(session=sess, per_op=per_op,
# network=network, dataset=eval3_data,
# eval_batch_size=batch_size)
duration_eval = time.time() - start_time_eval
print('Evaluation time: %.3f min' %
(duration_eval / 60))
start_time_epoch = time.time()
start_time_step = time.time()
duration_train = time.time() - start_time_train
print('Total time: %.3f hour' % (duration_train / 3600))
# Save train & dev loss
save_loss(csv_steps, csv_train_loss, csv_dev_loss,
save_path=network.model_dir)
# Training was finished correctly
with open(os.path.join(network.model_dir, 'complete.txt'), 'w') as f:
f.write('')
def main(config_path, trained_model_path):
restore_epoch = None # if None, restore the final epoch
# Read a config file (.yml)
with open(config_path, "r") as f:
config = yaml.load(f)
corpus = config['corpus']
feature = config['feature']
param = config['param']
if corpus['label_type'] == 'phone':
if corpus['social_signal_type'] in ['insert', 'insert3']:
output_size = 41
elif corpus['social_signal_type'] == 'insert2':
output_size = 44
elif corpus['social_signal_type'] == 'remove':
output_size = 38
elif corpus['label_type'] == 'character':
if corpus['social_signal_type'] in ['insert', 'insert3']:
output_size = 150
elif corpus['social_signal_type'] == 'insert2':
output_size = 153
elif corpus['social_signal_type'] == 'remove':
output_size = 147
# Load model
CTCModel = load(model_type=config['model_name'])
network = CTCModel(batch_size=param['batch_size'],
input_size=feature['input_size'] * feature['num_stack'],
num_cell=param['num_cell'],
num_layer=param['num_layer'],
output_size=output_size,
clip_gradients=param['clip_grad'],
clip_activation=param['clip_activation'],
dropout_ratio_input=param['dropout_input'],
dropout_ratio_hidden=param['dropout_hidden'],
num_proj=param['num_proj'],
weight_decay=param['weight_decay'])
network.model_name = config['model_name'].upper()
network.model_name += '_' + str(param['num_cell'])
network.model_name += '_' + str(param['num_layer'])
network.model_name += '_' + param['optimizer']
network.model_name += '_lr' + str(param['learning_rate'])
if feature['num_stack'] != 1:
network.model_name += '_stack' + str(feature['num_stack'])
network.model_name += '_transfer_' + corpus['transfer_data_size']
# Set save path
network.model_dir = mkdir('/n/sd8/inaguma/result/csj/dialog/')
network.model_dir = join(network.model_dir, 'ctc')
network.model_dir = join(network.model_dir, corpus['label_type'])
network.model_dir = join(network.model_dir, corpus['social_signal_type'])
network.model_dir = join(network.model_dir, network.model_name)
# Reset model directory
if not os.path.isfile(os.path.join(network.model_dir, 'complete.txt')):
tf.gfile.DeleteRecursively(network.model_dir)
tf.gfile.MakeDirs(network.model_dir)
else:
raise ValueError('File exists.')
# Set process name
setproctitle('ctc_csj_dialog_' + corpus['label_type'] + '_' +
param['optimizer'] + '_' + corpus['social_signal_type'] +
'_transfer_' + corpus['transfer_data_size'])
# Save config file
shutil.copyfile(config_path, os.path.join(network.model_dir, 'config.yml'))
sys.stdout = open(os.path.join(network.model_dir, 'train.log'), 'w')
print(network.model_name)
do_fine_tune(network=network,
optimizer=param['optimizer'],
learning_rate=param['learning_rate'],
batch_size=param['batch_size'],
epoch_num=param['num_epoch'],
label_type=corpus['label_type'],
num_stack=feature['num_stack'],
num_skip=feature['num_skip'],
social_signal_type=corpus['social_signal_type'],
trained_model_path=trained_model_path,
restore_epoch=restore_epoch)
sys.stdout = sys.__stdout__
if __name__ == '__main__':
args = sys.argv
if len(args) != 3:
ValueError(
'Usage: python fine_tune_ctc.py path_to_config path_to_trained_model')
main(config_path=args[1], trained_model_path=args[2])
|
hirofumi0810/tensorflow_end2end_speech_recognition
|
examples/csj/fine_tuning/finetune_ctc_dialog.py
|
finetune_ctc_dialog.py
|
py
| 20,860 |
python
|
en
|
code
| 312 |
github-code
|
6
|
[
{
"api_name": "sys.path.append",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "sys.path.append",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "sys.path.append",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.Graph",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "data.read_dataset_ctc_dialog.DataSet",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "data.read_dataset_ctc_dialog.DataSet",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "data.read_dataset_ctc_dialog.DataSet",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "tensorflow.summary.merge",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "tensorflow.summary",
"line_number": 88,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.summary.merge",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "tensorflow.summary",
"line_number": 89,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.global_variables_initializer",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "tensorflow.train.Saver",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "tensorflow.train",
"line_number": 95,
"usage_type": "attribute"
},
{
"api_name": "utils.parameter.count_total_parameters",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "tensorflow.trainable_variables",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "tensorflow.Session",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "tensorflow.summary.FileWriter",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "tensorflow.summary",
"line_number": 112,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.train.get_checkpoint_state",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "tensorflow.train",
"line_number": 119,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.contrib.slim.get_variables_to_restore",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "tensorflow.contrib.slim",
"line_number": 130,
"usage_type": "name"
},
{
"api_name": "tensorflow.train.Saver",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "tensorflow.train",
"line_number": 132,
"usage_type": "attribute"
},
{
"api_name": "time.time",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "utils.data.sparsetensor.list2sparsetensor",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "utils.data.sparsetensor.list2sparsetensor",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 205,
"usage_type": "call"
},
{
"api_name": "sys.stdout.flush",
"line_number": 229,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 229,
"usage_type": "attribute"
},
{
"api_name": "time.time",
"line_number": 230,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 234,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 240,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 240,
"usage_type": "attribute"
},
{
"api_name": "time.time",
"line_number": 246,
"usage_type": "call"
},
{
"api_name": "evaluation.eval_ctc_dialog.do_eval_fmeasure",
"line_number": 249,
"usage_type": "call"
},
{
"api_name": "evaluation.eval_ctc_dialog.do_eval_fmeasure",
"line_number": 263,
"usage_type": "call"
},
{
"api_name": "evaluation.eval_ctc_dialog.do_eval_fmeasure",
"line_number": 282,
"usage_type": "call"
},
{
"api_name": "evaluation.eval_ctc_dialog.do_eval_fmeasure",
"line_number": 296,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 313,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 317,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 318,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 320,
"usage_type": "call"
},
{
"api_name": "utils.loss.save_loss",
"line_number": 324,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 328,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 328,
"usage_type": "attribute"
},
{
"api_name": "yaml.load",
"line_number": 338,
"usage_type": "call"
},
{
"api_name": "models.ctc.load_model.load",
"line_number": 359,
"usage_type": "call"
},
{
"api_name": "utils.util.mkdir",
"line_number": 382,
"usage_type": "call"
},
{
"api_name": "utils.util.join",
"line_number": 383,
"usage_type": "call"
},
{
"api_name": "utils.util.join",
"line_number": 384,
"usage_type": "call"
},
{
"api_name": "utils.util.join",
"line_number": 385,
"usage_type": "call"
},
{
"api_name": "utils.util.join",
"line_number": 386,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 389,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 389,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 389,
"usage_type": "call"
},
{
"api_name": "tensorflow.gfile.DeleteRecursively",
"line_number": 390,
"usage_type": "call"
},
{
"api_name": "tensorflow.gfile",
"line_number": 390,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.gfile.MakeDirs",
"line_number": 391,
"usage_type": "call"
},
{
"api_name": "tensorflow.gfile",
"line_number": 391,
"usage_type": "attribute"
},
{
"api_name": "setproctitle.setproctitle",
"line_number": 396,
"usage_type": "call"
},
{
"api_name": "shutil.copyfile",
"line_number": 401,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 401,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 401,
"usage_type": "attribute"
},
{
"api_name": "sys.stdout",
"line_number": 403,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 403,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 403,
"usage_type": "attribute"
},
{
"api_name": "sys.stdout",
"line_number": 416,
"usage_type": "attribute"
},
{
"api_name": "sys.__stdout__",
"line_number": 416,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 421,
"usage_type": "attribute"
}
] |
43133597312
|
# This script is developed by Team 11 CIS 3760
# Parser - to parse the plain text file into json file
import sys
import re
import json
import os
sys.path.insert(1, os.path.join(sys.path[0], '..'))
import course_util
uOCourseCodeIsolatedPat = re.compile(r'^\w{3}[ ]?\d{4}$')
uOCourseCodePat = re.compile(r'\w{3}[ ]?\d{4}')
uOOneOfCondPat = re.compile(r'One of .+', re.IGNORECASE)
uOAntiReqPat = re.compile(r'The courses? (\w{3}\w?[ ]?\d{4},?[ ]?)+', re.IGNORECASE)
uOCarlAltPat = re.compile(r'\w{3}[ ]?\d{4} \(\w{4}[ ]?\d{4}(?:(?: or | ou | and | et )\w{4}[ ]?\d{4})+\)')
uOOrListPat = re.compile(r'\(\w{3,4}[ ]?\d{4}(?:(?: or | ou )\w{3,4}[ ]?\d{4})\)')
uOOrListNoBracketPat = re.compile(r'\w{3}[ ]?\d{4}(?:(?: or | ou )\w{3}[ ]?\d{4})+')
uOOrCondNoBracketIsoPat = re.compile(r'^\w{3,4}[ ]?\d{4}(?:(?: or | ou )\w{3}[ ]?\d{3,4})[.]?$')
uOEnglishCoursePat = re.compile(r'\w{3}[ ]?\d[1234]\d{2}')
uOAndListIsoPat = re.compile(r'^\w{3,4}[ ]?\d{4}(?:(?: and | et |, )\w{3,4}[ ]?\d{4})+$')
uOAndCondIsoPat = re.compile(r'^\w{3,4}[ ]?\d{4}(?:(?: and | et |, )\w{3,4}[ ]?\d{4})$')
uOAndCondPat = re.compile(r'\w{3,4}[ ]?\d{4}(?:(?: and | et |, )\w{3,4}[ ]?\d{4})')
uOAndOptionPat = re.compile(r'^\(?\w{3}[ ]?\d{4}(?:(?: and | et |, )\w{3}[ ]?\d{4})\)?$')
uOAndListPat = re.compile(r'^\(?\w{3}[ ]?\d{4}(?:(?: and | et |, )\w{3}[ ]?\d{4})+\)?$')
uONotCombineFRPat = re.compile(r'^Les cours \w{3}[ ]?\d{3,4}(?:(?: or | ou | and | et |, )\w{3,4}[ ]?\d{4})+ ne peuvent être combinés pour l\'obtention de crédits[.;]?', re.IGNORECASE)
uONotCombineENGPat = re.compile(r'^\w{3}[ ]?\d{3,4}(?:(?: or | ou | and | et |, )\w{3,4}[ ]?\d{4})+ cannot be combined for units[.;]?', re.IGNORECASE)
uOSingleThirdPartyOptPat = re.compile(r'^\w{3}[ ]?\d{4} \(\w{4}[ ]?\d{4}\)$')
uOThirdPartyCourseListPat = re.compile(r'\(?\w{4}[ ]?\d{4}(?:(?: or | ou | and | et |, )\w{4}[ ]?\d{4})\)?')
uOCreditPrereqENGPat = re.compile(r'^\d+ university units[.;]?$')
uOCreditPrereqFRPat = re.compile(r'^\d+ crédits universitaires[.;]?$')
# splits string by commas not appearing within () or []
stringListSplitterPat = re.compile(r'(?=,\s*(?![^\[\]\[\]]*[\]\]]))(?=,\s*(?![^()\[\]]*[\)\]]))')
def splitCourseOrCond(raw : str, pattern, coursePat=None) -> list:
courseOrList = []
splitOrCond = re.split(pattern, raw)
for courseCode in splitOrCond:
# remove any parenthesis
courseCode = courseCode.replace('(', '')
courseCode = courseCode.replace(')', '')
if coursePat:
if re.search(pattern, courseCode):
courseOrList.append(courseCode.strip())
else:
courseOrList.append(courseCode.strip())
return courseOrList
def splitOnPeriod(string : str):
splitList = re.split(r'\.', string)
return splitList
def splitOnComma(string : str):
newList = re.split(stringListSplitterPat, string)
for i, item in enumerate(newList):
if item.startswith(', '):
newList[i] = item[2:]
if item == '':
newList.pop(i)
return newList
def normalizeUnicodeString(string : str) -> str:
string = string.replace(u'\xa0', ' ')
# item = unicodedata.normalize('NFKC', item)
string = string.strip()
string = string.replace('Préalable : ', '')
string = string.replace('Préalables : ', '')
string = string.replace('Prerequisite: ', '')
string = string.replace('Prerequisites: ', '')
return string
def parseUOttawaPrereqElm(prereqString : str):
prereqList = []
splitList = splitOnPeriod(prereqString)
# print('splitList:', splitList)
for item in splitList:
item = normalizeUnicodeString(item)
if item == '' or re.search(uOAntiReqPat, item) or re.match(uONotCombineENGPat, item) \
or re.match(uONotCombineFRPat, item):
continue
# Case where or condition list in the form 'One of Calculus and Vectors (MCV4U) or MAT 1339'
if re.match(uOOneOfCondPat, item):
temp = item[7:]
orList = splitCourseOrCond(temp, r' or | ou ')
newList = []
numCourses = len(orList)
for newElem in orList:
newList.append(parseUOttawaPrereqElm(newElem))
prereqList.append((numCourses, newList))
commaSplitList = splitOnComma(item)
for element in commaSplitList:
# Single isolated couse code
if re.match(uOCourseCodeIsolatedPat, element):
prereqList.append(element)
# Message regarding 'cannot be combined' -- ignore case
elif re.match(uONotCombineFRPat, element) or re.match(uONotCombineENGPat, element):
continue
# premission required case (2 of several) -- rest ignored
elif re.match(r'Permission de l\'Institut', element) \
or re.match(r'permission of the instructor', element):
prereqList.append(element)
elif re.match(uOCreditPrereqENGPat, element) or re.match(uOCreditPrereqFRPat, element):
prereqList.append(element)
elif re.search(r'This course is (?:primarily )?intended', element) \
or re.search(r'principalement aux étudiants', element) \
or re.search(r'cours est destiné', element) or re.search(r'cours ne peut pas', element)\
or re.search(r'an equivalent', element) \
or re.search(r'verify your program', element) \
or re.search(r'cannot count', element):
pass
# case of a list with third party courses
elif re.search(uOThirdPartyCourseListPat, element):
temp = re.split(uOThirdPartyCourseListPat, element)
for item in temp:
if re.search(r'\w{4}[ ]?\d{4}', item):
pass
else:
prereqList.append(parseUOttawaPrereqElm(item))
# case where single course code has third party alternative
elif re.match(uOSingleThirdPartyOptPat, element):
newCourse = re.search(uOCourseCodePat, element).group()
prereqList.append(newCourse)
# single or condition without brackets
elif re.match(uOOrCondNoBracketIsoPat, element):
orList = splitCourseOrCond(element, r' or | ou ')
tempList = []
for item in orList:
if re.match(uOCourseCodeIsolatedPat, item):
tempList.append(item)
# case where something other than a course code is part of an OR group
# which then becomes of length 1
if len(tempList) == 1:
prereqList.append(tempList[0])
elif len(tempList) > 1:
prereqList.append(tempList)
# and list, possibly multiple
elif re.match(uOAndListIsoPat, element):
# single and condition (two courses)
if re.match(uOAndCondIsoPat, element):
andList = splitCourseOrCond(element, r' and | et |, ')
for item in andList:
if re.match(uOCourseCodeIsolatedPat, item):
prereqList.append(item)
# Ontario Highschool course code
elif re.search(r'[ \(][A-Z]{3}[ ]?\d[A-Z]', element):
newItem = re.search(r'[ \(][A-Z]{3}[ ]?\d[A-Z]', element).group()
if newItem.startswith('(') or newItem.startswith(' '):
newItem = newItem[1:]
prereqList.append(newItem)
# check if brackets surrounding text exist
elif re.search(r'\(.+\)', element):
#case where there is an OR list
if re.match(uOOrListPat, element):
prereqList.append(splitCourseOrCond(element, r' or | ou ', uOCourseCodePat))
# check if a uOttawa course code exists
elif re.search(uOCourseCodePat, element):
#split by commas outside of brackets
bracketSplitList = re.split(stringListSplitterPat, element)
tempList = []
for item in bracketSplitList:
# filter out split prereqs starting with 'or'
if re.search(r' or | ou ', item):
if item.startswith('or'):
pass
splitList = splitCourseOrCond(item, r' or | ou ', uOCourseCodePat)
for ele in splitList:
tempList.append(parseUOttawaPrereqElm(ele))
# filter out coreq cases
elif re.search(r'coreq', item) or re.search(r'concomitant', item):
pass
# if starting with a uOttawa course code, add it
elif re.match(r'^[a-zA-Z]{3}[ ]?\d{4}', item):
prereqList.append(re.match(r'^[a-zA-Z]{3}[ ]?\d{4}', item).group())
prereqList.append(tempList)
# filter everything else
else:
pass
return prereqList
def parseUOttawaPrereqs(courseData : list):
for i, element in enumerate(courseData):
# print('subject:', element['course_name'])
prereqString = element['prereqs']
# print('prereqstring:', prereqString)
if len(prereqString) == 0:
continue
prereqList = []
for ele in prereqString:
# if it is not an english course split and discard english translations if they exist
# potentially breaking if the ` / ` sequence exists for a different purpose
if not re.match(uOEnglishCoursePat, element['course_num']):
ele = ele.split(' / ', 2)[0]
prereq = parseUOttawaPrereqElm(ele)
if len(prereq) == 1 and prereq[0] == '':
pass
else:
if isinstance(prereq, list):
for item in prereq:
prereqList.append(item)
else:
prereqList.append(prereq)
#
#TODO: convert prereq to new object as per #115
#print(newPrereq)
#
courseData[i]['prereqs'] = prereqList
##################################
if __name__ == "__main__":
if not len(sys.argv) == 3:
exit('Invalid Arguments')
fileIn = sys.argv[1] # plain text file
fileOut = sys.argv[2] # output JSON file
tempList = []
courseDataList = course_util.get_courses(fileIn)
parseUOttawaPrereqs(courseDataList)
json_object = json.dumps(courseDataList, indent=2, ensure_ascii=False) # dumping into JSON
# Writing to sample.json
with open(fileOut, "w") as outfile: # writing to JSON file
outfile.truncate(0)
outfile.write(json_object)
|
jessendasilva1/UniveristySearch
|
graphing/util/parser/ottawaCourseParser.py
|
ottawaCourseParser.py
|
py
| 11,463 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "sys.path.insert",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "re.compile",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "re.IGNORECASE",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "re.compile",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "re.IGNORECASE",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "re.compile",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "re.IGNORECASE",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "re.compile",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "re.IGNORECASE",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "re.compile",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "re.split",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "re.split",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "re.split",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "re.match",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "re.match",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "re.match",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "re.match",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "re.match",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "re.match",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "re.match",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "re.match",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "re.split",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "re.match",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "re.match",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "re.match",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "re.match",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "re.match",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "re.match",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 172,
"usage_type": "call"
},
{
"api_name": "re.match",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 177,
"usage_type": "call"
},
{
"api_name": "re.split",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 183,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 190,
"usage_type": "call"
},
{
"api_name": "re.match",
"line_number": 193,
"usage_type": "call"
},
{
"api_name": "re.match",
"line_number": 194,
"usage_type": "call"
},
{
"api_name": "re.match",
"line_number": 217,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 240,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 243,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 244,
"usage_type": "attribute"
},
{
"api_name": "course_util.get_courses",
"line_number": 248,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 252,
"usage_type": "call"
}
] |
41211802240
|
#import cv2
from keras.applications.vgg16 import VGG16
from keras.preprocessing import image
from keras.applications.vgg16 import preprocess_input, decode_predictions
import numpy as np
import os
import sys
import json
from PIL import Image
# import Image
import requests
from io import BytesIO
import urllib3
import h5py
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
# y_test = []
# 計算相似矩陣
def cosine_similarity(ratings):
sim = ratings.dot(ratings.T)
if not isinstance(sim, np.ndarray):
sim = sim.toarray()
norms = np.array([np.sqrt(np.diagonal(sim))])
return (sim / norms / norms.T)
def saveVector(vector): ## don't use, just use it in colab to get the vector
save_file = h5py.File('../test.h5', 'w')
save_file.create_dataset('test', data=vector)
save_file.close()
def readvector():
open_file = h5py.File('/Users/liujiazhen/Documents/2020-2021/PFE/PFE/PFE/information-retrival-search-engine/informationRetrival/vgg16_p/test.h5', 'r')
vector = open_file['test'][:]
open_file.close()
return vector
def getTitleCheck_VGG():
a = np.load('/Users/liujiazhen/Documents/2020-2021/PFE/PFE/PFE/information-retrival-search-engine/informationRetrival/vgg16_p/title.npy', allow_pickle= True)
return a.item()
def compare():
# y_test = []
model = VGG16(weights='imagenet', include_top=False)
# 取样本
image_sample = Image.open("/Users/liujiazhen/Documents/2020-2021/PFE/PFE/PFE/information-retrival-search-engine/informationRetrival/frontend/static/frontend/images/temp.jpg")
imageS = image_sample.crop()
thisImage = imageS.resize((224, 224))
my_image = image.img_to_array(thisImage)
my_x = np.expand_dims(my_image, axis=0)
my_x = preprocess_input(my_x)
my_features = model.predict(my_x)
my_features_compress = my_features.reshape(1, 7 * 7 * 512)
# features_compress.append(my_features_compress)
features_compress = readvector()
# print(np.shape(features_compress))
# print(np.shape(my_features_compress))
new_features = np.append(features_compress, my_features_compress, axis=0)
# print(np.shape(new_features))
# exit(0)
sim = cosine_similarity(new_features)
# print("sim:", np.shape(sim))
# # 依命令行參數,取1個樣本測試測試
# inputNo = int(sys.argv[1]) # tiger, np.random.randint(0,len(y_test),1)[0]
# sample = y_test[inputNo]
# print(sample)
top = np.argsort(-sim[-1, :], axis=0)[1:3]
# 取得最相似的前2名序號
y_test = getTitleCheck_VGG()
recommend = [y_test[i] for i in top]
print(recommend)
# print(sim)
def compare_melanger():
# y_test = []
model = VGG16(weights='imagenet', include_top=False)
# 取样本
image_sample = Image.open("/Users/liujiazhen/Documents/2020-2021/PFE/PFE/PFE/information-retrival-search-engine/informationRetrival/frontend/static/frontend/images/temp.jpg") # 此处添加修改地址
imageS = image_sample.crop()
thisImage = imageS.resize((224, 224))
my_image = image.img_to_array(thisImage)
my_x = np.expand_dims(my_image, axis=0)
my_x = preprocess_input(my_x)
my_features = model.predict(my_x)
my_features_compress = my_features.reshape(1, 7 * 7 * 512)
# features_compress.append(my_features_compress)
features_compress = readvector()
# print(np.shape(features_compress))
# print(np.shape(my_features_compress))
new_features = np.append(features_compress, my_features_compress, axis=0)
# print(np.shape(new_features))
# exit(0)
sim = cosine_similarity(new_features)
return sim
def main():
# 自 images 目錄找出所有 JPEG 檔案
y_test = []
x_test = []
# x_test_final = []
# FILE_PATH = "/Users/panda/Desktop/movie_1202"
FILE_PATH = "/Users/panda/Downloads/archive/movies/movies"
IMAGE_BASE_PATH = "https://image.tmdb.org/t/p/w500"
flag = 0
# read file which is in the id_list
open_file = h5py.File('./id_list.h5', 'r')
id = open_file['id'][:]
open_file.close()
tmp = []
for i in range(len(id)):
tmp.append(int(id[i].decode('UTF-8')))
# print(tmp)
for movie in os.listdir(FILE_PATH):
# if flag < 50:
# flag += 1
# if flag == 245 or flag == 246 or flag == 247 or flag == 248:
# print(movie)
# else:
# continue
if movie.split(".")[1] != "json":
continue
movie_id = int(movie.split('_')[1].split('.')[0])
if movie_id in tmp:
# print(movie_id)
# open file
fr = open(FILE_PATH + "/" + movie)
# print(movie)
# print(movie_id)
movie_model = json.load(fr)
fr.close()
if movie_model['poster_path']:
img_path = IMAGE_BASE_PATH + movie_model['poster_path']
html = requests.get(img_path, verify=False)
poster = Image.open(BytesIO(html.content))
poster_img = poster.crop()
# poster = html.content
# imgByteArr = BytesIO()
# poster.save(imgByteArr, format=poster.format)
# poster = imgByteArr.getvalue()
# poster_img.show()
# img = poster_img.resize((224, 224))
# img.show()
# exit(1)
if poster:
# img = image.load_img(poster_img, target_size=(224, 224))
img = poster_img.resize((224, 224))
# img.show()
y_test.append(movie_id)
x = image.img_to_array(img)
# print(movie_id)
# print(x[:,:,0])
# print(np.shape(x[:,:,0]))
# exit(0)
if np.shape(x)[2] == 1:
x = np.stack((x[:, :, 0],) * 3, axis=-1)
x = np.expand_dims(x, axis=0)
if len(x_test) > 0:
# print(1)
# print(np.shape(x_test))
# print(np.shape(x))
# exit(0)
x_test = np.concatenate((x_test, x))
else:
# print(2)
x_test = x
# flag = flag + 1
# else:
# if len(x_test_final) > 0:
# # print(np.shape(x_test))
# # print(np.shape(x))
# # exit(0)
# #x_test = preprocess_input(x_test)
# x_test_final = np.concatenate((x_test_final, x_test))
# else:
# # x_test = preprocess_input(x_test)
# x_test_final = x_test
# x_test = []
# flag = 0
# x_test_final = np.concatenate((x_test_final, x_test))
# 轉成 VGG 的 input 格式
# print(x_test)
# print(type(x_test))
# print(np.shape(x_test))
x_test = preprocess_input(x_test)
# print(np.shape(x_test_final))
np.save("title.npy", y_test)
# include_top=False,表示會載入 VGG16 的模型,不包括加在最後3層的卷積層,通常是取得 Features (1,7,7,512)
model = VGG16(weights='imagenet', include_top=False)
# 萃取特徵
features = model.predict(x_test)
# print(np.shape(features))
# 計算相似矩陣
features_compress = features.reshape(len(y_test), 7 * 7 * 512)
# print(np.shape(features_compress))
# sim = cosine_similarity(features_compress)
saveVector(features_compress)
compare()
# # 取样本
# image_sample = Image.open("/Users/panda/Desktop/test_image/test.jpg")
# imageS = image_sample.crop()
# thisImage = imageS.resize((224, 224))
# my_image = image.img_to_array(thisImage)
# my_x = np.expand_dims(my_image, axis=0)
#
# my_x = preprocess_input(my_x)
#
# my_features = model.predict(my_x)
#
# my_features_compress = my_features.reshape(1, 7 * 7 * 512)
#
# # features_compress.append(my_features_compress)
#
# # print(np.shape(features_compress))
# # print(np.shape(my_features_compress))
# new_features = np.append(features_compress, my_features_compress, axis=0)
# # print(np.shape(new_features))
# # exit(0)
# sim = cosine_similarity(new_features)
# # print("sim:", np.shape(sim))
#
#
# # # 依命令行參數,取1個樣本測試測試
# # inputNo = int(sys.argv[1]) # tiger, np.random.randint(0,len(y_test),1)[0]
# # sample = y_test[inputNo]
# # print(sample)
# top = np.argsort(-sim[-1,:], axis=0)[1:3]
#
# # 取得最相似的前2名序號
# recommend = [y_test[i] for i in top]
# print(recommend)
# #print(sim)
# if __name__ == "__main__":
# main()
print(getTitleCheck_VGG())
|
ming19956/PFE
|
information-retrival-search-engine/informationRetrival/vgg16_p/newvgg.py
|
newvgg.py
|
py
| 9,112 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "urllib3.disable_warnings",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "urllib3.exceptions",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "numpy.ndarray",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "numpy.diagonal",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "h5py.File",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "h5py.File",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "keras.applications.vgg16.VGG16",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "keras.preprocessing.image.img_to_array",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "keras.preprocessing.image",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "numpy.expand_dims",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "keras.applications.vgg16.preprocess_input",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "numpy.append",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "numpy.argsort",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "keras.applications.vgg16.VGG16",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 86,
"usage_type": "name"
},
{
"api_name": "keras.preprocessing.image.img_to_array",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "keras.preprocessing.image",
"line_number": 89,
"usage_type": "name"
},
{
"api_name": "numpy.expand_dims",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "keras.applications.vgg16.preprocess_input",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "numpy.append",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "h5py.File",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 156,
"usage_type": "name"
},
{
"api_name": "io.BytesIO",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "keras.preprocessing.image.img_to_array",
"line_number": 172,
"usage_type": "call"
},
{
"api_name": "keras.preprocessing.image",
"line_number": 172,
"usage_type": "name"
},
{
"api_name": "numpy.shape",
"line_number": 177,
"usage_type": "call"
},
{
"api_name": "numpy.stack",
"line_number": 178,
"usage_type": "call"
},
{
"api_name": "numpy.expand_dims",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 186,
"usage_type": "call"
},
{
"api_name": "keras.applications.vgg16.preprocess_input",
"line_number": 213,
"usage_type": "call"
},
{
"api_name": "numpy.save",
"line_number": 216,
"usage_type": "call"
},
{
"api_name": "keras.applications.vgg16.VGG16",
"line_number": 219,
"usage_type": "call"
}
] |
12028632350
|
import tkinter
from tkinter import messagebox
from src.game.minesweeper import Minesweeper
from src.game.minesweeper import CellStatus
from src.game.minesweeper import GameStatus
from datetime import datetime
import platform
class MineweeperUI:
def __init__(self, root):
self.ui_window = root
self.ui_window.title("Minesweeper")
self.minesweeper = Minesweeper()
self.minesweeper.set_mines((int(round(datetime.now().timestamp() * 1000))))
self.cells= []
self.grid_init()
def grid_init(self):
right_click_type = self.button_os_config()
for row in range(0, 10):
self.cells.append([])
for column in range(0, 10):
cell = tkinter.Button(self.ui_window, text=" ", width=5, height=3,
command=lambda row=row, column=column, left_button = True: self.button_clicked(row, column, left_button))
cell.bind(right_click_type, lambda event, row=row, column=column, left_button = False: self.button_clicked(row, column, left_button))
cell.grid(row=row + 1, column=column)
self.cells[row].append(cell)
def button_os_config(self):
if platform.system() == "Darwin":
return "<Button-2>"
else:
return "<Button-3>"
def button_clicked(self,row, column, left_button):
if left_button:
self.minesweeper.expose(row, column)
else:
self.minesweeper.toggle_seal(row, column)
self.update_grid()
if self.minesweeper.get_game_status() == GameStatus.LOST:
self.show_mines()
messagebox.showinfo("Game Over", "You have step on a mine! ")
if platform.system() == "Windows":
exit()
elif self.minesweeper.get_game_status() == GameStatus.WON:
messagebox.showinfo("Congratulations!", "You are a minesweeper master! ")
if platform.system() == "Windows":
exit()
def show_mines(self):
for row in range(10):
for column in range(10):
if self.minesweeper.is_mine_at(row,column):
self.cells[row][column]["text"] = "*"
if platform.system() == "Darwin":
self.cells[row][column].config(highlightbackground="red", highlightthickness=1)
else:
self.cells[row][column].config(background='red')
def update_grid(self):
for row in range(10):
for column in range(10):
if platform.system() == "Darwin":
if self.minesweeper.get_status(row,column) == CellStatus.EXPOSED:
adjacent_value = self.minesweeper.adjacent_mine_count(row, column)
if adjacent_value > 0: self.cells[row][column]["text"] = str(adjacent_value)
self.cells[row][column].config(highlightbackground="Yellow", highlightthickness=1)
elif self.minesweeper.get_status(row, column) == CellStatus.SEAL:
self.cells[row][column].config(highlightbackground="green", highlightthickness=1)
else:
self.cells[row][column].config(highlightbackground="#DCDCDC", highlightthickness=1)
else:
if self.minesweeper.get_status(row, column) == CellStatus.EXPOSED:
adjacent_value = self.minesweeper.adjacent_mine_count(row, column)
if adjacent_value > 0: self.cells[row][column]["text"] = str(adjacent_value)
self.cells[row][column].config(background='Yellow')
elif self.minesweeper.get_status(row, column) == CellStatus.SEAL:
self.cells[row][column].config(background='green')
else:
self.cells[row][column].config(background='#DCDCDC')
class Main:
def __init__(self):
root = tkinter.Tk()
MineweeperUI(root)
root.mainloop()
|
PriscillaRoy/MinesweeperGame
|
src/gui/minesweeper_ui.py
|
minesweeper_ui.py
|
py
| 3,636 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "src.game.minesweeper.Minesweeper",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "tkinter.Button",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "platform.system",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "src.game.minesweeper.GameStatus.LOST",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "src.game.minesweeper.GameStatus",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "tkinter.messagebox.showinfo",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "tkinter.messagebox",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "platform.system",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "src.game.minesweeper.GameStatus.WON",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "src.game.minesweeper.GameStatus",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "tkinter.messagebox.showinfo",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "tkinter.messagebox",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "platform.system",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "platform.system",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "platform.system",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "src.game.minesweeper.CellStatus.EXPOSED",
"line_number": 74,
"usage_type": "attribute"
},
{
"api_name": "src.game.minesweeper.CellStatus",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "src.game.minesweeper.CellStatus.SEAL",
"line_number": 78,
"usage_type": "attribute"
},
{
"api_name": "src.game.minesweeper.CellStatus",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "src.game.minesweeper.CellStatus.EXPOSED",
"line_number": 84,
"usage_type": "attribute"
},
{
"api_name": "src.game.minesweeper.CellStatus",
"line_number": 84,
"usage_type": "name"
},
{
"api_name": "src.game.minesweeper.CellStatus.SEAL",
"line_number": 89,
"usage_type": "attribute"
},
{
"api_name": "src.game.minesweeper.CellStatus",
"line_number": 89,
"usage_type": "name"
},
{
"api_name": "tkinter.Tk",
"line_number": 98,
"usage_type": "call"
}
] |
22837983090
|
import pandas as pd
import networkx as nx
# def splitDataFrameList(df,target_column,separator):
# ''' df = dataframe to split,
# target_column = the column containing the values to split
# separator = the symbol used to perform the split
# returns: a dataframe with each entry for the target column separated, with each element moved into a new row.
# The values in the other columns are duplicated across the newly divided rows.
# '''
# def splitListToRows(row,row_accumulator,target_column,separator):
# split_row = row[target_column].split(separator)
# for s in split_row:
# new_row = row.to_dict()
# new_row[target_column] = s
# row_accumulator.append(new_row)
# new_rows = []
# df.apply(splitListToRows,axis=1,args = (new_rows,target_column,separator))
# new_df = pd.DataFrame(new_rows)
# return new_df
# df = pd.read_csv('../reading_and_cleaning/cleaned_podcasts.csv', sep='\t', index_col=0)
# df = df.replace(r'', np.nan, regex=True)
# df = df[pd.notnull(df['guests'])]
# split_hosts = splitDataFrameList(df, 'hosts', ', ')
#G1 = nx.from_pandas_dataframe(split_hosts, 'guests', 'hosts', edge_attr=['date', 'duration', 'podcast'], create_using=nx.MultiDiGraph())
# for index, row in split_hosts.iterrows():
# if(row['hosts'] == row['guests']):
# split_hosts.drop(index=index, inplace=True)
# guest_durations = split_hosts.groupby(['hosts', 'guests'])['duration'].sum()
# guest_durations = guest_durations.reset_index()
guest_durations = pd.read_csv('../reading_and_cleaning/guest_durations.csv', sep='\t', index_col=0)
G1 = nx.from_pandas_dataframe(guest_durations, 'guests', 'hosts', edge_attr=['duration'], create_using=nx.DiGraph())
G2 = nx.from_pandas_dataframe(guest_durations, 'guests', 'hosts', edge_attr=['duration'], create_using=nx.Graph())
##################################################################################################################################
remove = [node for node,degree in G2.degree().items() if degree < 3]
G2.remove_nodes_from(remove)
#print(nx.number_of_nodes(G2))
remove = [node for node,degree in G1.degree().items() if degree < 3]
G1.remove_nodes_from(remove)
#print(nx.number_of_nodes(G1))
##################################################################################################################################
pr = nx.pagerank(G1, weight='duration')
hubs, authorities = nx.hits(G1)
nodes_df = pd.DataFrame.from_dict(pr, orient='index')
nodes_df.rename(columns = {0:'pr'}, inplace = True)
nodes_df['hub'] = hubs.values()
nodes_df['auth'] = authorities.values()
#print(len(nodes_df), len(nx.eccentricity(G2).values()))
nodes_df['eccentricity'] = nx.eccentricity(G2).values()
nodes_df['closeness'] = nx.closeness_centrality(G2).values()
nodes_df['betweenness'] = nx.betweenness_centrality(G2).values()
nodes_df['degree_cen'] = nx.degree_centrality(G2).values()
nodes_df['eigen'] = nx.eigenvector_centrality(G2).values()
nodes_df.to_csv('node_values.csv', sep='\t')
|
brooksjaredc/podcast_network_analysis
|
analyzing_functions/set_node_attr.py
|
set_node_attr.py
|
py
| 2,988 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "pandas.read_csv",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "networkx.from_pandas_dataframe",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "networkx.DiGraph",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "networkx.from_pandas_dataframe",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "networkx.Graph",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "networkx.pagerank",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "networkx.hits",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame.from_dict",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "networkx.eccentricity",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "networkx.closeness_centrality",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "networkx.betweenness_centrality",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "networkx.degree_centrality",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "networkx.eigenvector_centrality",
"line_number": 72,
"usage_type": "call"
}
] |
35226848142
|
import torch
import torch.nn.functional
from .calculate_ssim import ssim
from .utils import fspecial_gauss
def ms_ssim(image1: torch.Tensor, image2: torch.Tensor, filter_weight: torch.Tensor) -> float:
""" Multi scale structural similarity
Args:
image1 (np.array): Original tensor picture.
image2 (np.array): Target tensor picture.
filter_weight (torch.Tensor): Gaussian filter weight.
Returns:
MS_SSIM value.
"""
assert image1.shape == image2.shape
ssim_value = ssim(image1, image2, filter_weight=filter_weight)
weights = torch.FloatTensor([0.0448, 0.2856, 0.3001, 0.2363, 0.1333]).to(image1.device, dtype=image2.dtype)
mcs = []
for _ in range(weights.shape[0]):
_, cs_value = ssim(image1, image2, filter_weight=filter_weight, cs=True)
mcs.append(cs_value)
padding = (image1.shape[2] % 2, image2.shape[3] % 2)
image1 = torch.nn.functional.avg_pool2d(image1, kernel_size=2, padding=padding)
image2 = torch.nn.functional.avg_pool2d(image2, kernel_size=2, padding=padding)
mcs = torch.stack(mcs, dim=0)
out = torch.prod((mcs[:-1] ** weights[:-1].unsqueeze(1)) * (ssim_value ** weights[-1]), dim=0)
return out
class MS_SSIM(torch.nn.Module):
def __init__(self) -> None:
super(MS_SSIM, self).__init__()
self.filter_weight = fspecial_gauss(11, 1.5)
def forward(self, image1_tensor: torch.Tensor, image2_tensor: torch.Tensor) -> torch.Tensor:
"""
Args:
image1_tensor (torch.Tensor): Original tensor picture.
image2_tensor (torch.Tensor): Target tensor picture.
Returns:
torch.Tensor.
"""
assert image1_tensor.shape == image2_tensor.shape
out = torch.mean(ms_ssim(image1_tensor, image2_tensor, filter_weight=self.filter_weight))
return out
|
avacaondata/SpainAI_Hackaton_ComputerVision
|
ESRGAN-PyTorch/esrgan_pytorch/utils/image_quality_assessment/calculate_mssim.py
|
calculate_mssim.py
|
py
| 1,882 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "torch.Tensor",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "calculate_ssim.ssim",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "torch.FloatTensor",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "calculate_ssim.ssim",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional.avg_pool2d",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.functional.avg_pool2d",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "torch.stack",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "torch.prod",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "utils.fspecial_gauss",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "torch.Tensor",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "torch.mean",
"line_number": 52,
"usage_type": "call"
}
] |
74077927228
|
from PyQt5.QtCore import QFile, QTextStream, QIODevice
class StyleLoader:
def __init__(self, variables_path: str = None):
self._variables = {}
self._stylesheets = {}
self._init_variables(variables_path)
def get_merged_stylesheets(self, names: list):
return self._merge_stylesheets([self.get_stylesheet(name=name) for name in names])
def get_stylesheet(self, name: str) -> str:
stylesheet = self._stylesheets.get(name)
if stylesheet is None:
stylesheet = self._create_stylesheet(name)
self._stylesheets[name] = stylesheet
return stylesheet
def _merge_stylesheets(self, stylesheets: list) -> str:
return "\n".join(stylesheets)
def _create_stylesheet(self, path: str) -> str:
stylesheet = self._load_unmapped_stylesheet(path)
return self._map_stylesheet(stylesheet)
def _load_unmapped_stylesheet(self, path: str) -> str:
file = QFile(path)
if not file.open(QIODevice.ReadOnly | QIODevice.Text):
return ""
content = file.readAll().data().decode("utf-8")
file.close()
return content
def _map_stylesheet(self, stylesheet: str) -> str:
for variable_name, variable_value in self._variables.items():
stylesheet = stylesheet.replace(variable_name, variable_value)
return stylesheet
def _init_variables(self, path: str) -> None:
if path is None:
return
file = QFile(path)
if not file.open(QIODevice.ReadOnly | QIODevice.Text):
return
stream = QTextStream(file)
while not stream.atEnd():
line = stream.readLine().strip().replace(" ", "")
if line.startswith("@"):
variable_name, variable_value = line.split("=", 1)
self._variables[variable_name] = variable_value
file.close()
|
lennertsoffers/KeyCursor
|
key_cursor_config/model/StyleLoader.py
|
StyleLoader.py
|
py
| 1,926 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "PyQt5.QtCore.QFile",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtCore.QIODevice.ReadOnly",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtCore.QIODevice",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtCore.QIODevice.Text",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtCore.QFile",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtCore.QIODevice.ReadOnly",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtCore.QIODevice",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtCore.QIODevice.Text",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtCore.QTextStream",
"line_number": 54,
"usage_type": "call"
}
] |
22781339759
|
import IMP
import IMP.pmi
import IMP.pmi.macros
import RMF
import matplotlib.pyplot as plt
import seaborn as sns
import sys
import numpy as np
import argparse
#########
# PARSER
#########
p = argparse.ArgumentParser(
description="Align selected RMF files. \n"
"Example of usage: align_rmf.py -d mc_tags -cl 2 -st 0"
)
p.add_argument('-d', action="store", dest="dir_name",
help="directory name to process")
p.add_argument('-cl', action="store", dest="cluster",
help="Specify cluster")
p.add_argument('-st', action="store", dest="state",
help="Specify RMF state")
parsero = p.parse_args()
def get_coordinates_alignment(hier, selection=None):
coord_dict = {}
if selection:
for k, v in selection.items():
sel = IMP.atom.Selection(hier,
molecule=v[0],
residue_indexes=np.arange(v[1], v[2], 1),
resolution=IMP.atom.ALL_RESOLUTIONS,
copy_index=v[3]).get_selected_particles()
coords = [np.array(IMP.core.XYZ(p).get_coordinates())
for p in sel]
coord_dict[k] = coords
else:
mols = IMP.pmi.tools.get_molecules(hier)
# print(mols)
for m in mols:
sel = IMP.atom.Selection(hier,
molecule=m.get_name(),
copy_index=IMP.atom.Copy(m).get_copy_index(),
resolution=IMP.atom.ALL_RESOLUTIONS).get_selected_particles()
coords = [np.array(IMP.core.XYZ(p).get_coordinates())
for p in sel]
coord_dict[m.get_name()] = coords
return coord_dict
def transform_coordinates(hier, transformation):
# Transform all coordinates
rbs, beads = IMP.pmi.tools.get_rbs_and_beads(hier)
for rb in rbs:
IMP.core.transform(rb, transformation)
for p in beads:
temp_coord = IMP.core.XYZ(p)
IMP.core.transform(temp_coord, transformation)
def get_reference_coordinates(rmf_in, selection=None):
"""
Get reference coordinates in reference rmf file
:param rmf_in: reference rmf file
:return: coordinates
"""
m = IMP.Model()
f = RMF.open_rmf_file_read_only(rmf_in)
hier = IMP.rmf.create_hierarchies(f, m)[0]
IMP.rmf.load_frame(f, RMF.FrameID(0))
# Get coordinates from frame 1
ref_coord = get_coordinates_alignment(hier, selection)
del m, f
return ref_coord
def align_rmf(rmf_in, rmf_out, ref_coord, selection=None, frames=None):
"""
Align selected frames in rmf_in to ref_coordinates and
calculate RMSD.
:param rmf_in: input rmf
:param rmf_out: output rmf
:param selection: selection of particles
:param ref_coord: reference coordinates after running Sampcon.py
:param frames: passing selected frames
:return:
"""
fh_out = RMF.create_rmf_file(rmf_out)
m = IMP.Model()
f = RMF.open_rmf_file_read_only(rmf_in)
print('Number of frames', f.get_number_of_frames())
if not frames:
frames = np.arange(0, f.get_number_of_frames(), 100)
hier = IMP.rmf.create_hierarchies(f, m)[0]
states = IMP.atom.get_by_type(hier, IMP.atom.STATE_TYPE)
for i, s in enumerate(states):
if i == sel_state:
p = IMP.Particle(m, 'System')
hier_temp = IMP.atom.Hierarchy.setup_particle(p)
hier_temp.add_child(s)
IMP.rmf.add_hierarchy(fh_out, hier_temp)
RMSD = []
for i in frames:
if i % 100 == 0: print('Frame:', i)
IMP.rmf.load_frame(f, RMF.FrameID(i))
temp_coord = get_coordinates_alignment(hier, selection)
ali = IMP.pmi.analysis.Alignment(ref_coord, temp_coord)
(rmsd, transformation) = ali.align()
RMSD.append(rmsd)
transform_coordinates(hier, transformation)
IMP.rmf.save_frame(fh_out, str(i))
del temp_coord
del f
print('Mean RMSD:', np.mean(np.array(RMSD)))
return RMSD
if __name__ == '__main__':
#####################
# MAIN
#####################
# IO files
dir_name = parsero.dir_name # mc_tags_k1
cl = parsero.cl # 2
st = parsero.st # 0
sel_state = 0
ref_rmf = f'../output/{dir_name}/analysis/rmsd/cluster.{cl}/cluster_center_model.rmf3'
rmf_in_A = f'../output/{dir_name}/analysis/A_models_clust{cl}_{st}.rmf3'
rmf_out_A = f'../output/{dir_name}/analysis/A_models_clust{cl}_{st}_aligned.rmf3'
rmf_in_B = f'../output/{dir_name}/analysis/B_models_clust{cl}_{st}.rmf3'
rmf_out_B = f'../output/{dir_name}/analysis/B_models_clust{cl}_{st}_aligned.rmf3'
cluster_frames_A = f'../output/{dir_name}/analysis/rmsd/cluster.{cl}.sample_A.txt'
cluster_frames_B = f'../output/{dir_name}/analysis/rmsd/cluster.{cl}.sample_B.txt'
m = IMP.Model()
f = RMF.open_rmf_file_read_only(rmf_in_A)
nframes_A = f.get_number_of_frames()
frames_A = [int(l.strip()) for l in open(cluster_frames_A, 'r')]
frames_B = [int(l.strip()) - nframes_A for l in open(cluster_frames_B, 'r')]
del m, f
#################################
# Get reference and align
#################################
reference_coordinates = get_reference_coordinates(ref_rmf)
rmsd_A = align_rmf(rmf_in_A, rmf_out_A, reference_coordinates, frames=frames_A)
rmsd_B = align_rmf(rmf_in_B, rmf_out_B, reference_coordinates, frames=frames_B)
#################################
# Plot RMSD distribution
#################################
sns.set(font_scale=3)
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(30, 40))
ax1.set_title(f'RMSD_A', size=50, y=1.15, fontweight='bold')
sns.histplot(x=rmsd_A, stat='density', fill=True, ax=ax1)
ax2.set_title(f'RMSD_B', size=50, y=1.15, fontweight='bold')
sns.histplot(x=rmsd_B, stat='density', fill=True, ax=ax2)
plt.tight_layout(pad=3.0)
# plt.show()
plt.savefig(f'../output/{dir_name}/analysis/pict_tags_rmsd.png')
print('\nDONE!\n')
sys.exit(0)
|
Altairch95/ExocystDYN
|
scripts/align_rmf.py
|
align_rmf.py
|
py
| 5,528 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "argparse.ArgumentParser",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "IMP.atom.Selection",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "IMP.atom",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "numpy.arange",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "IMP.atom",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "IMP.core.XYZ",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "IMP.core",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "IMP.pmi.tools.get_molecules",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "IMP.pmi",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "IMP.atom.Selection",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "IMP.atom",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "IMP.atom.Copy",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "IMP.atom",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "IMP.atom",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "IMP.core.XYZ",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "IMP.core",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "IMP.pmi.tools.get_rbs_and_beads",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "IMP.pmi",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "IMP.core.transform",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "IMP.core",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "IMP.core.XYZ",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "IMP.core",
"line_number": 64,
"usage_type": "attribute"
},
{
"api_name": "IMP.core.transform",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "IMP.core",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "IMP.Model",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "RMF.open_rmf_file_read_only",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "IMP.rmf.create_hierarchies",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "IMP.rmf",
"line_number": 77,
"usage_type": "attribute"
},
{
"api_name": "IMP.rmf.load_frame",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "IMP.rmf",
"line_number": 79,
"usage_type": "attribute"
},
{
"api_name": "RMF.FrameID",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "RMF.create_rmf_file",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "IMP.Model",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "RMF.open_rmf_file_read_only",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "IMP.rmf.create_hierarchies",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "IMP.rmf",
"line_number": 107,
"usage_type": "attribute"
},
{
"api_name": "IMP.atom.get_by_type",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "IMP.atom",
"line_number": 108,
"usage_type": "attribute"
},
{
"api_name": "IMP.Particle",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "IMP.atom.Hierarchy.setup_particle",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "IMP.atom",
"line_number": 112,
"usage_type": "attribute"
},
{
"api_name": "IMP.rmf.add_hierarchy",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "IMP.rmf",
"line_number": 114,
"usage_type": "attribute"
},
{
"api_name": "IMP.rmf.load_frame",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "IMP.rmf",
"line_number": 119,
"usage_type": "attribute"
},
{
"api_name": "RMF.FrameID",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "IMP.pmi.analysis.Alignment",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "IMP.pmi",
"line_number": 123,
"usage_type": "attribute"
},
{
"api_name": "IMP.rmf.save_frame",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "IMP.rmf",
"line_number": 128,
"usage_type": "attribute"
},
{
"api_name": "numpy.mean",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "IMP.Model",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "RMF.open_rmf_file_read_only",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "seaborn.set",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 177,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 177,
"usage_type": "name"
},
{
"api_name": "seaborn.histplot",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "seaborn.histplot",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.tight_layout",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 182,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 184,
"usage_type": "name"
},
{
"api_name": "sys.exit",
"line_number": 187,
"usage_type": "call"
}
] |
32562155548
|
from django.conf.urls.defaults import *
from django.conf import settings
from gallery.feeds import Photos, Videos, Tags, TagContents, Comments
try:
import django_openidconsumer
except ImportError:
django_openidconsumer = None
feeds = {
'comments': Comments,
'photos': Photos,
'videos': Videos,
'tags': Tags,
'tag': TagContents,
}
urls = [
(r'^$', 'gallery.views.index'),
(r'^feeds/(?P<url>.*)/$',
'django.contrib.syndication.views.feed', {'feed_dict': feeds}),
(r'^tag/(?P<tag_name>[\w\+\-]*)/$', 'gallery.views.medias_in_tag'),
(r'^tag/(?P<tag_name>[\w\+\-]*)/(?P<page>\d+)/$',
'gallery.views.medias_in_tag'),
(r'^event/(?P<media_type>\w+)/(?P<media_id>\d+)/(?P<event_id>\d+)/$',
'gallery.views.medias_in_event'),
(r'^photo/(?P<photo_id>\d+)/(?P<tag_name>[\w\-]*)/$',
'gallery.views.medias_in_tag'),
(r'^photo/(?P<photo_id>\d+)/$', 'gallery.views.photo'),
(r'^video/(?P<video_id>\d+)/(?P<tag_name>[\w\-]*)/$',
'gallery.views.medias_in_tag'),
(r'^date/(?P<year>\d+)/(?P<month>\d+)/(?P<day>\d+)/$',
'gallery.views.date'),
(r'^date/(?P<year>\d+)/(?P<month>\d+)/(?P<day>\d+)/(?P<page>\d+)/$',
'gallery.views.date'),
(r'^recent/$', 'gallery.views.recent'),
(r'^recent/(?P<tag_name>[\w\+\-]*)/$', 'gallery.views.recent'),
(r'^recent/(?P<tag_name>[\w\+\-]*)/(?P<page>\d+)/$', 'gallery.views.recent'),
(r'^events/$', 'gallery.views.events'),
(r'^event/(?P<event_id>\d+)/$', 'gallery.views.event'),
(r'^slideshow/(?P<tag_name>[\w\+\-]*)/(?P<photo_id>\d+)/$', 'gallery.views.slideshow'),
]
if django_openidconsumer:
urls.extend([
#(r'^comment/(?P<comment_id>\d+)/$', 'gallery.views.comment'),
(r'^openid/$', 'django_openidconsumer.views.begin', {'sreg': 'fullname'}),
(r'^openid/complete/$', 'django_openidconsumer.views.complete'),
(r'^openid/signout/$', 'django_openidconsumer.views.signout'),
(r'^status/cache/$', 'gallery.memcached_status.cache_status'),
])
media_path = settings.GALLERY_SETTINGS.get('media_path')
static_path = settings.GALLERY_SETTINGS.get('static_path')
if media_path:
urls.append((r'^media/(.*)$',
'django.views.static.serve', {'document_root': media_path}))
if static_path:
urls.append((r'^static/(.*)$',
'django.views.static.serve', {'document_root': static_path}))
urlpatterns = patterns('', *urls)
|
ginking/Gallery-1
|
urls.py
|
urls.py
|
py
| 2,422 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "gallery.feeds.Comments",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "gallery.feeds.Photos",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "gallery.feeds.Videos",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "gallery.feeds.Tags",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "gallery.feeds.TagContents",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "django.conf.settings.GALLERY_SETTINGS.get",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "django.conf.settings.GALLERY_SETTINGS",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "django.conf.settings.GALLERY_SETTINGS.get",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "django.conf.settings.GALLERY_SETTINGS",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 54,
"usage_type": "name"
}
] |
32463868402
|
import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch
import torchvision
import torchvision.transforms as transforms
class DataLoader:
def __init__(self, batch_size = 4):
'''
num_workers should be 0 in window env (otherwise pipe_error occurs)
'''
print("---- downloading dataset from online... ----")
trainset, testset = self.download_data()
self.trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size,
shuffle=True, num_workers=0)
self.testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size,
shuffle=False, num_workers=0)
def download_data(self):
'''
download CIFAR-10 data which can be replaced by MNIST or etc later on.
'''
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
trainset = torchvision.datasets.CIFAR10(root='./data', train=True,
download=True, transform=transform)
testset = torchvision.datasets.CIFAR10(root='./data', train=False,
download=True, transform=transform)
return trainset, testset
def show_image(self):
'''
Just for the testing.
'''
# get some random training images
dataiter = iter(self.trainloader)
images, labels = dataiter.next()
# show images
imshow(torchvision.utils.make_grid(images))
def imshow(img):
img = img / 2 + 0.5 # unnormalize
npimg = img.numpy()
plt.imshow(np.transpose(npimg, (1, 2, 0)))
plt.show()
|
jindeok/XAI_torch_captum
|
XAI_torch/utils.py
|
utils.py
|
py
| 2,051 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "torch.utils",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "torch.utils",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "torchvision.transforms.Compose",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.ToTensor",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.Normalize",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "torchvision.datasets.CIFAR10",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "torchvision.datasets",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "torchvision.datasets.CIFAR10",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "torchvision.datasets",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "torchvision.utils.make_grid",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "torchvision.utils",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "numpy.transpose",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 62,
"usage_type": "name"
}
] |
8915382368
|
from datetime import datetime
from astral import Astral
from pyHS100 import Discover, SmartPlug, SmartBulb
import time, socket, requests, pytz, simplejson
#Used to determine daylight status, occupancy status, and does the things
#if the things are needed based on prior info
class SmartSwitchControl():
a = Astral()
city = a['Toronto']
count = 0
def run(self):
global count
global lightson
now = datetime.now(pytz.utc)
sun = city.sun(date = now, local = True)
time.sleep(0.5)
if now >= sun['dusk'] or now <= sun['dawn']:
requests.post("https://maker.ifttt.com/trigger/motion/with/key/ctOeqYQKH00WbPhjj-fCRyio_MW6GdmEQ2as2h5bQvI")
Lightson = True
#print("Lights on")
elif now >= sun['dawn']:
#print("It's not dark yet")
pass
#Creates JSON syntaxed representation of current smart device info and status
def updateStatus(self):
devices = []
deviceCount = 0
try:
for dev in Discover.discover().values():
ipBreak = str(dev).split(' (')[0]
ip = ipBreak.split('at ')[1]
idBreak = str(dev).split('(')[1]
ID = idBreak.split(')')[0]
statusBreak = str(dev).split('is_on: ')[1]
status = statusBreak.split(' - ')[0]
if status == "True":
status = "on"
if status == "False":
status = "off"
entry = {'id': "switch"+str(deviceCount),
'name': ID,
'is_on': status,
'ip': ip
}
devices.append(entry)
deviceCount += 1
return devices
except Exception as e:
print("Error in device detection...resetting", e)
pass
|
bradyjibanez/Voyager
|
occupantInference/smartSwitchControl.py
|
smartSwitchControl.py
|
py
| 2,417 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "astral.Astral",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "pytz.utc",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "time.sleep",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "pyHS100.Discover.discover",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "pyHS100.Discover",
"line_number": 32,
"usage_type": "name"
}
] |
11735575748
|
"""Add File table
Revision ID: 3822d04489a0
Revises:
Create Date: 2021-06-26 16:18:52.167545
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '3822d04489a0'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('files',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('filepath', sa.String(), nullable=False),
sa.Column('storage', sa.String(), nullable=False),
sa.Column('primary', sa.Boolean(), nullable=False),
sa.Column('has_thumbnail', sa.Boolean(), nullable=False),
sa.Column('category', sa.Enum('unknown', 'other', 'image', 'photo', 'scan', 'dump', 'dump_metadata', 'text', 'prose', 'transcription', 'collection', name='filecategory'), nullable=True),
sa.Column('title', sa.String(), nullable=True),
sa.Column('comment', sa.Text(), nullable=True),
sa.Column('analyzed', sa.DateTime(), nullable=True),
sa.Column('upload_date', sa.DateTime(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('asset_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['asset_id'], ['assets.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('files')
# ### end Alembic commands ###
|
retroherna/rhinventory
|
alembic/versions/3822d04489a0_add_file_table.py
|
3822d04489a0_add_file_table.py
|
py
| 1,531 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "alembic.op.create_table",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.String",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.String",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Boolean",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Boolean",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Enum",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.String",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Text",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.DateTime",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.DateTime",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.ForeignKeyConstraint",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.ForeignKeyConstraint",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.PrimaryKeyConstraint",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "alembic.op.drop_table",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 43,
"usage_type": "name"
}
] |
16106180145
|
#!/usr/bin/env python3
if __name__ == "__main__":
import argparse
import os
import benj
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--input", dest="h5ad", required=True)
ap.add_argument("-o", "--output", required=True)
ap.add_argument("--labels", required=True, nargs="+")
ap.add_argument("-b", "--batch", required=True)
ap.add_argument("--hvg", type=int, default=5000)
ap.add_argument("--compression", type=int, default=6)
ap.add_argument("--with-mean", dest="with_mean", action="store_true")
ap.add_argument("--without-mean", dest="with_mean", action="store_false")
ap.set_defaults(with_mean=False)
args = benj.parse_args(ap, ["log", "scanpy", "anndata"])
if "subset" not in args or args["subset"] is None:
args["subset"] = []
sw = benj.stopwatch()
with sw("Reading H5AD"):
adata = benj.parse_anndata(**args)
adata = benj.integrate_rna(adata,
batch=args["batch"], hvg=args["hvg"], use_scaling=True, use_harmony=True, use_bbknn=False, use_rgg=False, plot=args["labels"], target_sum=1e4,
output=args["output"], compression=args["compression"])
with sw("Training celltypist for " + ",".join(label)):
import celltypist
import scanpy as sc
ct = celltypist.train(adata.raw.to_adata(), labels=adata.obs.loc[:, labels], genes=adata.var_names, n_jobs=-1, with_mean=args["with_mean"])
ct.write(os.path.join(sc.settings.figdir, f"celltypist_{label}.pkl"))
|
KellisLab/benj
|
scripts/integrate_and_train.py
|
integrate_and_train.py
|
py
| 1,676 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "argparse.ArgumentParser",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "benj.parse_args",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "benj.stopwatch",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "benj.parse_anndata",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "benj.integrate_rna",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "celltypist.train",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "scanpy.settings",
"line_number": 30,
"usage_type": "attribute"
}
] |
24604094520
|
# Packages
import time
import selenium
from selenium import webdriver
import NameExtractor
app_names = []
element_web = []
k = 0
count = 1
def decompiler(path, file_path):
global app_names, driver
driver = webdriver.Chrome(path)
driver.maximize_window()
app_names = NameExtractor.name_extractor(file_path)
for i in app_names:
try:
driver.refresh()
driver.get("https://www.apkdecompilers.com/")
time.sleep(5)
# find element
# extra code
print(app_names)
driver.get("https://www.apkdecompilers.com/")
driver.find_element_by_id('apk')
time.sleep(10)
# send element
element_send = driver.find_element_by_id("apk")
element_send.send_keys(i)
print(i)
driver.find_element_by_id("submit-btn").click()
time.sleep(270)
# download element
driver.find_element_by_xpath("/html/body/section[1]/div/div/div/div/div/div/div/div[2]/div/div/div/div/div/div/div[2]/a/b/u/h3").click()
time.sleep(50)
except:
# find element
# extra code
driver.refresh()
driver.get("http://www.javadecompilers.com/apk")
# send element
element_send = driver.find_element_by_id("upload_datafile")
element_send.send_keys(i)
print(i)
send_element = driver.find_element_by_xpath("/html/body/div[2]/div/div[2]/div/div[2]/div/form/div/div/div/div[2]/div[1]/div/button")
webdriver.ActionChains(driver).move_to_element(send_element).click(send_element).perform()
time.sleep(270)
# download element
down_element = driver.find_element_by_xpath("/html/body/div[2]/div/div[2]/div/div[2]/div/div[2]/div/div[1]/div/div[2]/div/div[1]/div[2]/a")
webdriver.ActionChains(driver).move_to_element(down_element).click(down_element).perform()
time.sleep(50)
driver.close()
driver_path = "C:\Program Files (x86)\Python38-32\Chrome Driver\chromedriver.exe"
file_path = "D:\Project\Privacy Detection\Apps\App Data.xlsx"
decompiler(driver_path,file_path)
|
Neilnarnaware/Privacy-Detection-of-Android-Application
|
Decompiler.py
|
Decompiler.py
|
py
| 2,296 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "NameExtractor.name_extractor",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.ActionChains",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.ActionChains",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 56,
"usage_type": "call"
}
] |
32509221733
|
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats as sc
import csv
def myAnova(Matrice, pvalue_crit):
# Initialisation :
H = 0
F = 0
var_intra = 0
var_inter = 0
obs_moy = 0
eff_tot = 0
# Moyenne des classes :
for i in range(len(Matrice)):
obs_moy = sum(Matrice[i]) + obs_moy
eff_tot = len(Matrice[i]) + eff_tot
# print("Moyenne de la classe", i, "=", round(obs_moy/eff_tot,1))
# print("Effectif total =", eff_tot, "\n")
obs_moy = obs_moy / eff_tot
# print("Moyenne de la moyenne des classes =", round(obs_moy,1))
# print("Effectif Total =", eff_tot)
# Variance intra (= moyenne des classes) :
sum_var_intra = 0
for j in range(0, len(Matrice)):
for i in range(0, len(Matrice[j])):
sum_var_intra = sum_var_intra + (Matrice[j][i] - np.mean(Matrice[j])) ** 2
var_intra = (1 / eff_tot) * sum_var_intra
# print("Variance Intra = ", round(var_intra, 3))
# Variance inter (= moyenne des observations) :
sum_var_inter = 0
for j in range(0, len(Matrice)):
sum_var_inter = sum_var_inter + (len(Matrice[j]) * (np.mean(Matrice[j]) - obs_moy) ** 2)
var_inter = (1 / eff_tot) * sum_var_inter
# print("Variance Inter = ", round(var_inter, 3))
# Valeur de la stat F :
# var_tot = var_intra + var_inter
F = (var_inter / (len(Matrice) - 1)) / (var_intra / (eff_tot - len(Matrice)))
# print("Variance Totale = ", round(var_tot, 3))
# print("Satistique F = ", round(F, 3))
# Hypothèse H (=0 ou 1) :
p_value = sc.f.cdf(F, len(Matrice) - 1, eff_tot - len(Matrice))
# print("pValue = ", round(p_value, 4))
if (p_value > 1 - pvalue_crit):
H = False
# print("H =", H, "--> Donc on rejette H0.\n")
else:
H = True
# print("H =", H, "--> Donc on valide H0.\n")
return (H, F, var_intra, var_inter)
def readDat_csv(NomDuFichierCSV, nbline, nbcol):
# Auteur P. Maurine
# Date : 13/12/2019
# Prend le fichier csv NomDuFichierCSV de n lignes p colonnes et retourne
# une matrice de nxp floats
L = []
Sample = np.array([], dtype=float)
with open(NomDuFichierCSV, newline='') as f:
read = csv.reader(f, delimiter=";")
for row in read:
L.extend(row)
Sample = [float(i) for i in L]
Sample = np.reshape(Sample, [nbline, nbcol])
return (Sample)
def Temp_Dept(Sample, Departement):
Temp = np.zeros(len(Sample[1]) - 1)
indice = 0
for i in range(1, len(Sample[1])):
Temp[indice] = Sample[np.where(Sample[:, 0] == Departement), i]
indice += 1
return (Temp)
def Temp_An(Sample, Annee):
Temp = np.zeros((len(Sample) - 1))
indice = 0
for i in range(1, len(Sample)):
Temp[indice] = Sample[i, np.where(Sample[0, :] == Annee)]
indice += 1
return (Temp)
def Temp_Dept(Sample, Departement):
Temp = np.zeros(len(Sample[1]) - 1)
indice = 0
for i in range(1, len(Sample[1])):
Temp[indice] = Sample[np.where(Sample[:, 0] == Departement), i]
indice += 1
return (Temp)
matCSV = readDat_csv("DonneesMeteoFrance.csv", 95, 47)
Temp_Fr_2000 = Temp_An(matCSV, 2000)
# print("Températures enregistrées en France lors de l'an 2000 :\n", Temp_Fr_2000, "\n")
Temp_Fr_2005 = Temp_An(matCSV, 2005)
# print("Températures enregistrées en France lors de l'an 2005 :\n", Temp_Fr_2005, "\n")
Temp_Fr_2010 = Temp_An(matCSV, 2010)
# print("Températures enregistrées en France lors de l'an 2010 :\n", Temp_Fr_2010, "\n")
Mat_An_1 = np.zeros((3, len(Temp_Fr_2000)))
for i in range(0, len(Temp_Fr_2000)):
Mat_An_1[0][i] = Temp_Fr_2000[i]
Mat_An_1[1][i] = Temp_Fr_2005[i]
Mat_An_1[2][i] = Temp_Fr_2010[i]
# print("Matrice pour 2000, 2005 et 2010 :\n", Mat_An_1, "\n")
H_An1, F_An1, var_intra_An1, var_inter_An1 = myAnova(Mat_An_1, 0.05)
print("Utilisation de la fonction myAnova (Année 2000, 2005 et 2010) :")
print("--> Hypothèse H =", H_An1, "(On rejette H0)")
print("--> Statistique F =", round(F_An1, 2))
print("--> Variance Intra =", round(var_intra_An1, 2))
print("--> Variance Inter =", round(var_inter_An1, 2), "\n")
# Températures en Frane entre 1970, 1975 et 1980 : ----------------------------
# Extraction des données :
Temp_Fr_1970 = Temp_An(matCSV, 1970)
# print("Températures enregistrées en France lors de l'an 1970 :\n", Temp_Fr_1970, "\n")
Temp_Fr_1975 = Temp_An(matCSV, 1975)
# print("Températures enregistrées en France lors de l'an 1975 :\n", Temp_Fr_1975, "\n")
Temp_Fr_1980 = Temp_An(matCSV, 1980)
# print("Températures enregistrées en France lors de l'an 1980 :\n", Temp_Fr_1980, "\n")
Mat_An_2 = np.zeros((3, len(Temp_Fr_2000)))
for i in range(0, len(Temp_Fr_2000)):
Mat_An_2[0][i] = Temp_Fr_1970[i]
Mat_An_2[1][i] = Temp_Fr_1975[i]
Mat_An_2[2][i] = Temp_Fr_1980[i]
# print("Matrice pour 1970, 1975 et 1980 :\n", Mat_An_2, "\n")
H_An2, F_An2, var_intra_An2, var_inter_An2 = myAnova(Mat_An_2, 0.05)
print("Utilisation de la fonction myAnova (Année 1970, 1975 et 1980) :")
print("--> Hypothèse H =", H_An2, "(On rejette H0)")
print("--> Statistique F =", round(F_An2, 2))
print("--> Variance Intra =", round(var_intra_An2, 2))
print("--> Variance Inter =", round(var_inter_An2, 2), "\n")
|
Varelafv/TD6.py
|
TD2-EXO2.py
|
TD2-EXO2.py
|
py
| 5,319 |
python
|
fr
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "numpy.mean",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "scipy.stats.f.cdf",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "scipy.stats.f",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "scipy.stats",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "csv.reader",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "numpy.reshape",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 125,
"usage_type": "call"
}
] |
45254772596
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 20 23 20:40:00
@author: kirsh012
"""
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import seaborn as sns
import joblib
from sklearn.model_selection import PredefinedSplit, GridSearchCV
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import roc_curve, auc, precision_recall_curve
from sklearn.utils import class_weight
### Load custom modules
import nn_models as nnm
import dataprocessing as dp
import visualization as viz
### Load the data for comparison
data, varnames, target = dp.load_data_nn('1-sf', sensor='both', dlh=0, keep_SH=False, return_target=True)
# Split data into time oriented chunks
train_idx, test_idx, val_idx = nnm.split_data_cv_indx(data,target)
train_labels = target[train_idx]
test_labels = target[test_idx]
val_labels = target[val_idx]
print("Train labels shape: ", train_labels.shape)
print("Test labels shape: ", test_labels.shape)
print("Val labels shape: ", val_labels.shape)
train_data = data[train_idx,:]
test_data = data[test_idx,:]
val_data = data[val_idx,:]
print("Train shape: ", train_data.shape)
print("Test shape: ", test_data.shape)
print("Val shape: ", val_data.shape)
# Use indices to make PredefinedSplit for hyperparameter optimization
train_idx = np.full( (train_data.shape[0],) , -1, dtype=int)
val_idx = np.full( (val_data.shape[0], ) , 0, dtype=int)
test_fold = np.append(train_idx, val_idx)
print(test_fold.shape)
ps = PredefinedSplit(test_fold)
print(ps)
combined_train_data = np.vstack((train_data, val_data))
combined_train_labels = np.vstack((train_labels.reshape(-1,1), val_labels.reshape(-1,1))).ravel()
print("Combined train data shape: ", combined_train_data.shape)
print("Combined labels shape:", combined_train_labels)
param_grid = {
'n_estimators': [100, 200, 500, 1000, 2000, 5000],
'max_depth': [5, 10, 15, 20, 25, None]
}
# Compute the class weights
train_weights = class_weight.compute_class_weight(class_weight='balanced',
classes=np.unique(combined_train_labels), y=combined_train_labels)
train_weights = {i: weight for i, weight in enumerate(train_weights)}
from pathlib import Path
filename = Path("tuned_rf_model_weighted.pkl")
### Save the model
if not filename.exists():
print("Running the hyperparameter testing model...")
clf = GridSearchCV(RandomForestClassifier(class_weight=train_weights), param_grid=param_grid, scoring='roc_auc_ovr_weighted', cv = ps, verbose=3)
clf.fit(combined_train_data, combined_train_labels)
joblib.dump(clf, filename)
else:
clf = joblib.load(filename)
# Print hyperparameter esults
print("Report: \n", pd.DataFrame(clf.cv_results_))
print("Best inner loop score: ", clf.best_score_)
print("Best parameters: ", clf.best_params_)
# Predict on the anomalous
train_preds = clf.predict_proba(train_data)
test_preds = clf.predict_proba(test_data)
val_preds = clf.predict_proba(val_data)
### Visualize Performance
# Return AU-ROC
fpr_test, tpr_test, thresh_test = roc_curve(test_labels, test_preds[:,1])
fpr_train, tpr_train, thresh_train = roc_curve(train_labels, train_preds[:,1])
fpr_val, tpr_val, thresh_val = roc_curve(val_labels, val_preds[:,1])
# Return AU-PRC
ppr_test, rec_test, pthresh_test = precision_recall_curve(test_labels, test_preds[:,1])
ppr_train, rec_train, pthresh_train = precision_recall_curve(train_labels, train_preds[:,1])
ppr_val, rec_val, pthresh_val = precision_recall_curve(val_labels, val_preds[:,1])
viz.plot_roc_curve(tpr_train, fpr_train, tpr_val, fpr_val, tpr_test, fpr_test, title = "RandomForest AU-ROC")
viz.plot_prc_curve(rec_train, ppr_train, rec_val, ppr_val, rec_test, ppr_test, title = "RandomForest AU-PRC")
plt.show()
|
tk27182/masters-thesis
|
Code/run_test_randomforest.py
|
run_test_randomforest.py
|
py
| 3,799 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "dataprocessing.load_data_nn",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "nn_models.split_data_cv_indx",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "numpy.full",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "numpy.full",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "numpy.append",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.PredefinedSplit",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "numpy.vstack",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "numpy.vstack",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "sklearn.utils.class_weight.compute_class_weight",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "sklearn.utils.class_weight",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "numpy.unique",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.GridSearchCV",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "sklearn.ensemble.RandomForestClassifier",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "joblib.dump",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "joblib.load",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.roc_curve",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.roc_curve",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.roc_curve",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.precision_recall_curve",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.precision_recall_curve",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.precision_recall_curve",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "visualization.plot_roc_curve",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "visualization.plot_prc_curve",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 109,
"usage_type": "name"
}
] |
25145519000
|
from dataclasses import dataclass
from typing import Any
from msg.serializers import BaseExpenseCreationSerializer, BaseExpensePropertySerializer
@dataclass
class ExpenseCreationHelper:
data: dict
def __call__(self, *args: Any, **kwds: Any) -> Any:
if not self._parse_data():
return
return True
def _parse_data(self):
user_id = self.data.pop('user_id', None)
expense_category_name = self.data.pop('expense_category_name', None)
expense_ser = BaseExpenseCreationSerializer(data={
'user_id': user_id,
'expense_category_name': expense_category_name
})
expense_ser.is_valid(raise_exception=True)
expense_inst = expense_ser.save()
self.data['expense_id'] = expense_inst.id
expense_property_ser = BaseExpensePropertySerializer(data=self.data)
expense_property_ser.is_valid(raise_exception=True)
expense_property_ser.save()
return True
|
enamsaraev/tg_api
|
msg/helpers.py
|
helpers.py
|
py
| 1,000 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "typing.Any",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "msg.serializers.BaseExpenseCreationSerializer",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "msg.serializers.BaseExpensePropertySerializer",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "dataclasses.dataclass",
"line_number": 7,
"usage_type": "name"
}
] |
5589205557
|
import re
import plac
import ujson as json
from utils import load_jsonl_file, dumps_jsonl
regex_ws = re.compile(r'\s+')
def load_corpus(path):
documents = json.load(open(path, 'r'))
return documents
def hydrate(parses, relation):
doc = parses.get(relation['DocID'])
text = doc.get('text', '') if doc else ''
return {
'Arg1': {
'TokenList': relation['Arg1'],
'RawText': ' '.join([text[t[0]:t[1]] for t in relation['Arg1']]),
},
'Arg2': {
'TokenList': relation['Arg2'],
'RawText': ' '.join([text[t[0]:t[1]] for t in relation['Arg2']]),
},
'Connective': {
'TokenList': relation['Connective'],
'RawText': ' '.join([text[t[0]:t[1]] for t in relation['Connective']]),
},
'DocId': relation['DocID']
}
def main(parses_path, relation_path):
corpus = load_corpus(parses_path)
relations = load_jsonl_file(relation_path)
dumps_jsonl(map(lambda r: hydrate(corpus, r), relations))
if __name__ == '__main__':
plac.call(main)
|
rknaebel/bbc-discourse
|
hydrate.py
|
hydrate.py
|
py
| 1,089 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "re.compile",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "ujson.load",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "utils.load_jsonl_file",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "utils.dumps_jsonl",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "plac.call",
"line_number": 43,
"usage_type": "call"
}
] |
38081298604
|
import asyncio
import threading
from sqlalchemy.orm import Query
from ConsumerService.consumer.persistence import db
from ConsumerService.consumer.business import manage_event_data
from aio_pika import connect, ExchangeType
from flask import Flask, request, jsonify, Response
app = Flask(__name__)
@app.route('/getPatientMedsPeriods')
def get_patient_med_period():
p_id = request.args['p_id']
med = request.args['med']
patient_meds_periods = []
result = manage_event_data.get_patient_med_period(p_id, med)
if isinstance(result,Query):
_len = result.count()
else:
_len = len(result)
if _len > 0:
for r in result:
a = {"p_id": r.p_id, "medication_name": r.medication_name,
"medication_period_start": r.medication_period_start,
"medication_period_end": r.medication_period_end}
patient_meds_periods.append(a)
return jsonify(patient_meds_periods)
else: # No elements in the result --> return NOT_FOUND_404
return Response('No medication periods has been found for patient {} with medication: {}'.format(p_id, med),
404)
@app.route('/getPatientAllMedsPeriods')
def get_patient_all_meds_period():
p_id = request.args['p_id']
patient_meds_periods = []
result = manage_event_data.get_patient_all_meds_periods(p_id)
if isinstance(result,Query):
_len = result.count()
else:
_len = len(result)
if _len > 0:
for r in result:
a = {"p_id": r.p_id, "medication_name": r.medication_name,
"medication_period_start": r.medication_period_start,
"medication_period_end": r.medication_period_end}
patient_meds_periods.append(a)
return jsonify(patient_meds_periods)
else: # No elements in the result --> return NOT_FOUND_404
return Response('No medication periods has been found for patient:{}'.format(p_id), 404)
async def main(loop):
print("Connecting to the PostgreSQL database...")
if not db.is_table_exist():
conn = db.create_tables()
else:
conn = db.get_connection()
connection = await connect(host="localhost",
login="admin",
password="password",
loop=loop
)
# connection = await connect(host=os.environ.get('RABBIT_HOST'),
# login=os.environ.get('RABBIT_USER'),
# password=os.environ.get('RABBIT_PASS'),
# loop=loop
# )
async with connection:
# Creating a channel
channel = await connection.channel()
# Declaring the queue
queue = await channel.declare_queue(name='events', auto_delete=True)
exchange = await channel.declare_exchange("meds", ExchangeType.FANOUT)
routing_key = "new.events"
await queue.bind(exchange, routing_key)
async with queue.iterator() as queue_iter:
async for message in queue_iter:
async with message.process():
event_str = message.body.decode('UTF-8')
manage_event_data.save_event(event_str)
if __name__ == '__main__':
# start flask on separate thread
threading.Thread(target=lambda : app.run(debug=True, use_reloader=False)).start()
# Get the current event loop.
# If there is no current event loop set in the current OS thread,
# the OS thread is main, and set_event_loop() has not yet been called,
# asyncio will create a new event loop and set it as the current one.
loop = asyncio.get_event_loop()
if loop is not None:
loop.run_until_complete(main(loop))
else:
print("Error establishing event loop!")
|
oran1980/clewMedical
|
application-assignment/ConsumerService/consumer/main.py
|
main.py
|
py
| 3,890 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "flask.Flask",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "flask.request.args",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "ConsumerService.consumer.business.manage_event_data.get_patient_med_period",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "ConsumerService.consumer.business.manage_event_data",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.orm.Query",
"line_number": 19,
"usage_type": "argument"
},
{
"api_name": "flask.jsonify",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "flask.Response",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "ConsumerService.consumer.business.manage_event_data.get_patient_all_meds_periods",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "ConsumerService.consumer.business.manage_event_data",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.orm.Query",
"line_number": 41,
"usage_type": "argument"
},
{
"api_name": "flask.jsonify",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "flask.Response",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "ConsumerService.consumer.persistence.db.is_table_exist",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "ConsumerService.consumer.persistence.db",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "ConsumerService.consumer.persistence.db.create_tables",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "ConsumerService.consumer.persistence.db",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "ConsumerService.consumer.persistence.db.get_connection",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "ConsumerService.consumer.persistence.db",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "aio_pika.connect",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "aio_pika.ExchangeType.FANOUT",
"line_number": 82,
"usage_type": "attribute"
},
{
"api_name": "aio_pika.ExchangeType",
"line_number": 82,
"usage_type": "name"
},
{
"api_name": "ConsumerService.consumer.business.manage_event_data.save_event",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "ConsumerService.consumer.business.manage_event_data",
"line_number": 90,
"usage_type": "name"
},
{
"api_name": "threading.Thread",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "asyncio.get_event_loop",
"line_number": 100,
"usage_type": "call"
}
] |
2721867306
|
"""
api.py
~~~~~~
This file define simple REST APi for a Machine Learning Model
"""
from os import environ as env
from joblib import load
from flask import abort, Flask, jsonify, make_response, request
from pandas import DataFrame
service_name = env['SERVICE_NAME']
version = env['API_VERSION']
model = load('data/model.joblib')
app = Flask(__name__)
@app.route(f'/{service_name}/v{version}/predict', methods=['POST'])
def predict():
"""Predict Incoming Request"""
try:
req = request.json
print(req)
features = DataFrame(req)
prediction = model.predict(features).tolist()
return make_response(jsonify({'prediction': prediction}))
except ValueError:
raise RuntimeError('Features are not in the correct format.')
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5001)
|
repodevs/flask-machine-learning-service
|
api.py
|
api.py
|
py
| 846 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "os.environ",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "os.environ",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "joblib.load",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "flask.Flask",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "flask.request.json",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "flask.make_response",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 29,
"usage_type": "call"
}
] |
5145788640
|
import enum
from ocrdgen.font.font import FontManager
from ocrdgen.image.background import BgManager
from pathlib import Path
import numpy as np
from PIL import ImageDraw, Image
from ocrdgen.ops import boxes_ops
import cv2 as cv
from collections import OrderedDict
from .base import BaseDrawer
from ocrdgen import models
class WordDrawer(BaseDrawer):
def __init__(self, image: Image, font, text, xy, align="left", anchor=None,
image_mode="RGBA", fill=(0,0,0)):
super().__init__(image=image, font=font, text=text, xy=xy,
anchor=anchor,
align=align, image_mode=image_mode, fill=fill)
text_test = self.text.strip().split(" ")
# print(len(text_split), text_split)
assert len(text_test) == 1, f"Error, expected one word only, but more word is given!"
def draw_text(self, image=None):
if type(image) == type(None):
image = self.image.copy()
idraw = ImageDraw.Draw(image)
idraw.text(self.xy, self.text, font=self.font, fill=self.fill)
# idraw.textbbox()
return image
def draw_bbox(self, image, color=(255,0,0,255), thick=1):
xmin, ymin, xmax, ymax = self.xymm_with_offset(self.text, self.x, self.y)
np_img = cv.rectangle(np.array(image), (xmin, ymin), (xmax, ymax), color, thick)
return Image.fromarray(np_img)
def draw(self, image=None, draw_bbox=False, bbox_color=(255, 0, 0, 255), bbox_thick=1):
image = self.draw_text(image)
bbox = self.wordbbox()
if draw_bbox:
image = self.draw_bbox(image, color=bbox_color, thick=bbox_thick)
return image, bbox
def wordbbox(self):
wordbox = models.WordBox(text=self.text, bbox=self.textbbox(), chars=self.charbbox())
return wordbox
def charbbox(self):
data = []
xmin, ymin = self.x, self.y
for i in range(len(self.text)):
if len(self.text[i])>0:
xymm = self.xymm_with_offset(self.text[i], xmin, ymin)
xywh = boxes_ops.xymm_to_xywh(xymm)
dt = models.CharBox(char=self.text[i], bbox=xywh, seq_id=i)
# dt = (self.text[i], xywh)
_, _, xmax, _ = xymm
xmin = xmax
data.append(dt)
return data
def draw_char_text(self, image=None):
image = self.draw_text(image)
return image
def draw_char_bbox(self, image, color=(0,255,0,255), thick=1):
image: np.ndarray = np.array(image)
charboxes = self.charbbox()
for idx, charbox in enumerate(charboxes):
char, xywh = charbox.char, charbox.bbox
xmin,ymin,xmax,ymax = boxes_ops.xywh_to_xymm(xywh)
if char!=" ":
# xmin, ymin, xmax, ymax = self.xymm_with_offset(char, x, y)
image = cv.rectangle(image, (xmin, ymin), (xmax, ymax), color, thick)
image: Image = Image.fromarray(image)
return image
def draw_char(self, image=None, draw_bbox=False, bbox_color=(0,255,0,255), bbox_thick=1):
image = self.draw_char_text(image)
bbox = self.charbbox()
if draw_bbox:
image = self.draw_char_bbox(image, color=bbox_color, thick=bbox_thick)
return image, bbox
|
nunenuh/ocrdgen
|
ocrdgen/drawer/word.py
|
word.py
|
py
| 3,432 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "base.BaseDrawer",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "PIL.Image",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "PIL.ImageDraw.Draw",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "PIL.ImageDraw",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "cv2.rectangle",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "PIL.Image.fromarray",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "ocrdgen.models.WordBox",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "ocrdgen.models",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "ocrdgen.ops.boxes_ops.xymm_to_xywh",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "ocrdgen.ops.boxes_ops",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "ocrdgen.models.CharBox",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "ocrdgen.models",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "numpy.ndarray",
"line_number": 72,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "ocrdgen.ops.boxes_ops.xywh_to_xymm",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "ocrdgen.ops.boxes_ops",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "cv2.rectangle",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "PIL.Image.fromarray",
"line_number": 81,
"usage_type": "call"
}
] |
73831952826
|
from odoo import api, models, fields, _
from odoo.exceptions import UserError
import logging
_logger = logging.getLogger(__name__)
class LgpsPartner(models.Model):
_inherit = 'res.partner'
client_type = fields.Selection(
[
('new', _('New')),
('aftersales', _('After Sales')),
],
default='new',
string=_("Client Type")
)
first_installation_day = fields.Date(
string=_("First Installation Day")
)
custom_status = fields.Selection(
[
('active', _('Active')),
('cancelled', _('Cancelled')),
('demo', _('Demo')),
('inactive', _('Inactive')),
('suspended', _('Suspended')),
('on_negotiation', _('On Negotiation')),
('in_credit_bureau', _('In Credit Bureau')),
],
default='active'
)
client_rank = fields.Selection(
[
('a', 'A'),
('b', 'B'),
('c', 'C'),
('d', 'D'),
('e', 'E'),
],
default='e',
string=_("Client Rank")
)
coordination_executive = fields.Many2one(
comodel_name="res.users",
string=_("Coordination"),
ondelete="set null",
)
credit_collection_executive = fields.Many2one(
comodel_name="res.users",
string=_("Credit and Collection"),
ondelete="set null",
)
after_sales_executive = fields.Many2one(
comodel_name="res.users",
string=_("After Sales"),
ondelete="set null",
)
special_negotiations = fields.Boolean(
string=_("Special Negotiations"),
default=False
)
special_negotiation_notes = fields.Html(
string=_("Special Negotiations Notes")
)
gpsdevice_ids = fields.One2many(
comodel_name="lgps.device",
inverse_name="client_id",
string="Gps Devices",
readonly=True,
)
@api.model
def create(self, values):
if self._check_if_can_create():
new_record = super(LgpsPartner, self).create(values)
return new_record
# def write(self, values):
# if self._check_if_can_create():
# return super(LgpsPartner, self).write(values)
def _check_if_can_create(self):
user = self.env.user
if not user.has_group('lgps.lgps_group_create_contacts'):
raise UserError('Solo personal de Administración y Finanzas puede dar alta de Clientes y Proveedores '
'nuevos.')
return True
|
intralix/odoo-addons
|
lgps/models/custom_partner.py
|
custom_partner.py
|
py
| 2,580 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "logging.getLogger",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "odoo.models.Model",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "odoo.models",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "odoo.fields.Selection",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "odoo.fields",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "odoo._",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "odoo._",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "odoo._",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "odoo.fields.Date",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "odoo.fields",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "odoo._",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "odoo.fields.Selection",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "odoo.fields",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "odoo._",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "odoo._",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "odoo._",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "odoo._",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "odoo._",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "odoo._",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "odoo._",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "odoo.fields.Selection",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "odoo.fields",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "odoo._",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "odoo.fields.Many2one",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "odoo.fields",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "odoo._",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "odoo.fields.Many2one",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "odoo.fields",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "odoo._",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "odoo.fields.Many2one",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "odoo.fields",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "odoo._",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "odoo.fields.Boolean",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "odoo.fields",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "odoo._",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "odoo.fields.Html",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "odoo.fields",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "odoo._",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "odoo.fields.One2many",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "odoo.fields",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "odoo.api.model",
"line_number": 82,
"usage_type": "attribute"
},
{
"api_name": "odoo.api",
"line_number": 82,
"usage_type": "name"
},
{
"api_name": "odoo.exceptions.UserError",
"line_number": 95,
"usage_type": "call"
}
] |
13842222350
|
import cv2
import numpy as np
import math
from numpy import random as nr
import sys
def lines(code=None, step=12):
l = np.zeros((h, w, 3), np.uint8)
l[:] = 255
if code == 0: # - horizontal
for i in range(0, h, step):
l = cv2.line(l, (0, i), (w, i), black)
elif code == 1: # | horizontal
for i in range(0, w, step):
l = cv2.line(l, (i, 0), (i, h), black)
elif code == 2: # \ 45
l = lines(code=3, step=step)
l = cv2.flip(l, 0)
elif code == 3: # / 45
for i in range(0, 2*w, step):
l = cv2.line(l, (i, 0), (0, i), black)
elif code == 4: # / 22.5
cotheta = 2.4142
tantheta = 0.4142
for i in range(0, int(w+h*cotheta), step):
l = cv2.line(l, (i, 0), (0, int(i*tantheta)), black)
elif code == 5: # / 67.5
cotheta = 0.4142
tantheta = 2.4142
for i in range(0, int(w+h*cotheta), step):
l = cv2.line(l, (i, 0), (0, int(i*tantheta)), black)
else:
pass # empty
return l
def tsh(img, stage=None, Numberoftsh=None, equalizeHist=False):
type = cv2.THRESH_BINARY
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
if equalizeHist == False:
pass
else:
img_gray = cv2.equalizeHist(img_gray, img_gray)
_, th = cv2.threshold(img_gray, 255-int(((stage)/Numberoftsh)*255), 255, type)
th = cv2.cvtColor(th, cv2.COLOR_GRAY2BGR)
return th
def createmasks(img, Numberoftsh=None):
global masks
for i in range(Numberoftsh):
if seqline[i] == 4:
step = 16
elif seqline[i] == 5:
step = 10
else:
step = 8
if masks is not None:
masks = np.append(masks, np.expand_dims(lines(code=seqline[i], step=step), axis=0), axis=0)
else:
masks = lines(code=seqline[i], step=step)
masks = np.expand_dims(masks, axis=0)
#print(masks.shape)
return masks
def crosshatching(img, Numberoftsh=None, equalizeHist=False, color=False):
global frame, flag, w, h
h, w, _ = img.shape
frame = np.zeros((h, w, 3), np.uint8)
frame[:] = 255
if flag is False:
createmasks(img, Numberoftsh=Numberoftsh)
flag = True
for i in range(Numberoftsh):
th = tsh(img, stage=i, Numberoftsh=Numberoftsh, equalizeHist=equalizeHist)
dst = cv2.addWeighted(masks[i], 1, th, 1, 0)
dst = cv2.bitwise_and(dst, frame)
frame = dst
if color is False:
return frame
else:
frame = cv2.bitwise_or(frame, img)
return frame
def showimage(img, Numberoftsh = 7, equalizeHist=False):
global w, h
h, w, _ = img.shape
dst = crosshatching(img, Numberoftsh=Numberoftsh, equalizeHist=equalizeHist, color=True)
#dst = cv2.resize(dst, (int(w/2), int(h/2)))
cv2.imshow('dst', dst)
cv2.waitKey(0)
cv2.destroyAllWindows()
def playvideo(video=None, Numberoftsh=None, color=False):
global w, h
if video is None:
cap = cv2.VideoCapture(0)
else:
cap = video
while True:
_, frame = cap.read()
if video is None:
frame = cv2.flip(frame, 1)
frame = crosshatching(frame, Numberoftsh=Numberoftsh, equalizeHist=False, color=color)
cv2.imshow('main', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
black = (0, 0, 0)
white = (255, 255, 255)
#red = (0, 0, 255)
#green = (0, 255, 0)
#blue = (255, 0, 0)
seqline = (-1, 0, 4, 3, 5, 2, 1)
masks = None
flag = False # existence of line masks
if __name__ == "__main__":
if len(sys.argv) > 1:
pass
else:
#img = cv2.imread('eagle.jpg')
#h, w, _ = img.shape
#img = cv2.resize(img, (int(w/8), int(h/8)))
#showimage(img)
#video = cv2.VideoCapture(0)
#video = cv2.VideoCapture('video/Wildlife.wmv')
playvideo(video=None, Numberoftsh=7, color=True)
|
ZZ76/filters
|
crosshatching.py
|
crosshatching.py
|
py
| 3,968 |
python
|
en
|
code
| 4 |
github-code
|
6
|
[
{
"api_name": "numpy.zeros",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "cv2.line",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "cv2.line",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "cv2.flip",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "cv2.line",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "cv2.line",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "cv2.line",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "cv2.THRESH_BINARY",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "cv2.cvtColor",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRAY",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "cv2.equalizeHist",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "cv2.threshold",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_GRAY2BGR",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "numpy.append",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "numpy.expand_dims",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "numpy.expand_dims",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 72,
"usage_type": "attribute"
},
{
"api_name": "cv2.addWeighted",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "cv2.bitwise_and",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "cv2.bitwise_or",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "cv2.destroyAllWindows",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "cv2.VideoCapture",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "cv2.flip",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 126,
"usage_type": "attribute"
}
] |
26822225482
|
import hashlib
import base64
import os
import sys
from Crypto.Cipher import AES
from hashlib import md5
from PyQt4 import QtGui, QtCore
import collections
from eclib import EC
from eclib import DiffieHellman
class MainWindow(QtGui.QWidget):
def __init__(self):
QtGui.QWidget.__init__(self)
global to_enc
global dec1
global dec2
global label_ans
self.setGeometry(0, 0, 500, 650)
self.setWindowTitle("Elliptic Curve Cryptography")
self.setWindowIcon(QtGui.QIcon("icon.png"))
self.resize(500, 650)
self.setMinimumSize(500, 650)
self.center()
self.tab_widget = QtGui.QTabWidget()
tab = QtGui.QWidget()
tab2 = QtGui.QWidget()
p3_vertical = QtGui.QVBoxLayout(tab)
self.tab_widget.addTab(tab, "EC Diffie Hellman")
# ECDH GUI DECLARATIONS
labele1 = QtGui.QLabel(" Elliptical Curve EQUATION ")
labele2 = QtGui.QLabel("y^3 = x^2 + ax + b( mod q )")
labele1.setStyleSheet('font-size: 13pt')
labele2.setStyleSheet('font-size: 12pt')
labele1.setAlignment(QtCore.Qt.AlignCenter)
labele2.setAlignment(QtCore.Qt.AlignCenter)
labela = QtGui.QLabel("Enter value of a:")
labelb = QtGui.QLabel("Enter value of b:")
labelc = QtGui.QLabel("Enter value of q (prime):")
label_PrivA = QtGui.QLabel("Enter Private Key of A:")
label_PrivB = QtGui.QLabel("Enter Private Key of B:")
label_result = QtGui.QLabel("ENCODED / DECODED TEXT")
label_result.setStyleSheet('font-size: 12pt')
textEdit = QtGui.QTextEdit()
button_file = QtGui.QPushButton("Import File")
button_encrypt = QtGui.QPushButton("Encrypt")
button_decrypt = QtGui.QPushButton("Decrypt")
button_file.clicked.connect(self.importfile)
button_encrypt.clicked.connect(self.ecdhencrypt)
button_decrypt.clicked.connect(self.ecdhdecrypt)
self.vala = QtGui.QTextEdit()
self.valb = QtGui.QTextEdit()
self.valc = QtGui.QTextEdit()
self.apriv = QtGui.QTextEdit()
self.bpriv = QtGui.QTextEdit()
self.textEdit = QtGui.QTextEdit()
self.vala.setMaximumHeight(labela.sizeHint().height()*1.5)
self.valb.setMaximumHeight(labelb.sizeHint().height()*1.5)
self.valc.setMaximumHeight(labelc.sizeHint().height()*1.5)
self.apriv.setMaximumHeight(label_PrivA.sizeHint().height()*1.5)
self.bpriv.setMaximumHeight(label_PrivB.sizeHint().height()*1.5)
hbox = QtGui.QHBoxLayout()
hbox1 = QtGui.QHBoxLayout()
vbox1 = QtGui.QHBoxLayout()
vbox2 = QtGui.QHBoxLayout()
# GUI LAYOUT
p3_vertical.addWidget(labele1)
p3_vertical.addWidget(labele2)
vbox1.addWidget(labela)
vbox1.addWidget(self.vala)
vbox2.addWidget(labelb)
vbox2.addWidget(self.valb)
hbox1.addLayout(vbox1)
hbox1.addLayout(vbox2)
p3_vertical.addLayout(hbox1)
p3_vertical.addWidget(labelc)
p3_vertical.addWidget(self.valc)
p3_vertical.addWidget(label_PrivA)
p3_vertical.addWidget(self.apriv)
p3_vertical.addWidget(label_PrivB)
p3_vertical.addWidget(self.bpriv)
p3_vertical.addWidget(button_file)
p3_vertical.addWidget(label_result)
p3_vertical.addWidget(self.textEdit)
hbox.addWidget(button_encrypt)
hbox.addWidget(button_decrypt)
p3_vertical.addStretch(1)
p3_vertical.addLayout(hbox)
vbox = QtGui.QVBoxLayout()
vbox.addWidget(self.tab_widget)
self.setLayout(vbox)
# GUI Functionality
def ecdhencrypt(self):
global A, B, C, PrivA, PrivB
A = int(self.vala.toPlainText())
B = int(self.valb.toPlainText())
C = int(self.valc.toPlainText())
PrivA = int(self.apriv.toPlainText())
PrivB = int(self.bpriv.toPlainText())
txt = data
ec = EC(A, B, C)
g, _ = ec.at(7)
assert ec.order(g) <= ec.q
dh = DiffieHellman(ec, g)
apub = dh.gen(PrivA)
bpub = dh.gen(PrivB)
assert dh.secret(PrivA, bpub) == dh.secret(PrivB, apub)
BLOCK_SIZE = 64
PADDING = '{'
pad = lambda s: s + (BLOCK_SIZE - len(s) % BLOCK_SIZE) * PADDING
EncodeAES = lambda c, s: base64.b64encode(c.encrypt(pad(s)))
x, y = dh.secret(PrivA, apub)
secret = x+y
secret = hashlib.md5('secret').hexdigest()
cipher = AES.new(secret)
encoded = EncodeAES(cipher, txt)
self.textEdit.setText(encoded)
fileName = open('Encrypted.txt', 'w')
fileName.write(encoded)
fileName.close()
def ecdhdecrypt(self):
global A, B, C, PrivA, PrivB
A = int(self.vala.toPlainText())
B = int(self.valb.toPlainText())
C = int(self.valc.toPlainText())
PrivA = int(self.apriv.toPlainText())
PrivB = int(self.bpriv.toPlainText())
txt = data
ec = EC(A, B, C)
g, _ = ec.at(7)
assert ec.order(g) <= ec.q
dh = DiffieHellman(ec, g)
apub = dh.gen(PrivA)
bpub = dh.gen(PrivB)
assert dh.secret(PrivA, bpub) == dh.secret(PrivB, apub)
BLOCK_SIZE = 64
PADDING = '{'
pad = lambda s: s + (BLOCK_SIZE - len(s) % BLOCK_SIZE) * PADDING
DecodeAES = lambda c, e: c.decrypt(base64.b64decode(e)).rstrip(PADDING)
x, y = dh.secret(PrivA, apub)
secret = x+y
secret = hashlib.md5('secret').hexdigest()
cipher = AES.new(secret)
decoded = DecodeAES(cipher, txt)
self.textEdit.setText(decoded)
fileName = open('Decrypted.txt', 'w')
fileName.write(decoded)
fileName.close()
def importfile(self):
global data
fname = QtGui.QFileDialog.getOpenFileName(self, 'Open file', '/home')
f = open(fname, 'r')
with f:
data = f.read()
def center(self):
screen = QtGui.QDesktopWidget().screenGeometry()
size = self.geometry()
self.move(
(screen.width()-size.width())/2, (screen.height()-size.height())/2)
def loadValues(self):
global ec
global eg
idx = self.tab_widget.currentIndex()
if idx == 1:
global g
global pub
ec = EC(a, b, q)
g, _ = ec.at(7)
eg = ElGamal(ec, g)
pub = eg.gen(priv)
print_pub = str(pub[0]) + "," + str(pub[1])
self.elg_key.insertPlainText(print_pub)
app = QtGui.QApplication(sys.argv)
frame = MainWindow()
frame.show()
sys.exit(app.exec_())
|
iCHAIT/Elliptical-Curve-Cryptography
|
gui.py
|
gui.py
|
py
| 6,704 |
python
|
en
|
code
| 24 |
github-code
|
6
|
[
{
"api_name": "PyQt4.QtGui.QWidget",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "PyQt4.QtGui",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "PyQt4.QtGui.QWidget.__init__",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "PyQt4.QtGui.QWidget",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "PyQt4.QtGui",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "PyQt4.QtGui.QIcon",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "PyQt4.QtGui",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "PyQt4.QtGui.QTabWidget",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "PyQt4.QtGui",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "PyQt4.QtGui.QWidget",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "PyQt4.QtGui",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "PyQt4.QtGui.QWidget",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "PyQt4.QtGui",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "PyQt4.QtGui.QVBoxLayout",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "PyQt4.QtGui",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "PyQt4.QtGui.QLabel",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "PyQt4.QtGui",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "PyQt4.QtGui.QLabel",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "PyQt4.QtGui",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "PyQt4.QtCore.Qt",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "PyQt4.QtCore",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "PyQt4.QtCore.Qt",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "PyQt4.QtCore",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "PyQt4.QtGui.QLabel",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "PyQt4.QtGui",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "PyQt4.QtGui.QLabel",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "PyQt4.QtGui",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "PyQt4.QtGui.QLabel",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "PyQt4.QtGui",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "PyQt4.QtGui.QLabel",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "PyQt4.QtGui",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "PyQt4.QtGui.QLabel",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "PyQt4.QtGui",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "PyQt4.QtGui.QLabel",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "PyQt4.QtGui",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "PyQt4.QtGui.QTextEdit",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "PyQt4.QtGui",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "PyQt4.QtGui.QPushButton",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "PyQt4.QtGui",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "PyQt4.QtGui.QPushButton",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "PyQt4.QtGui",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "PyQt4.QtGui.QPushButton",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "PyQt4.QtGui",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "PyQt4.QtGui.QTextEdit",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "PyQt4.QtGui",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "PyQt4.QtGui.QTextEdit",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "PyQt4.QtGui",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "PyQt4.QtGui.QTextEdit",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "PyQt4.QtGui",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "PyQt4.QtGui.QTextEdit",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "PyQt4.QtGui",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "PyQt4.QtGui.QTextEdit",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "PyQt4.QtGui",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "PyQt4.QtGui.QTextEdit",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "PyQt4.QtGui",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "PyQt4.QtGui.QHBoxLayout",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "PyQt4.QtGui",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "PyQt4.QtGui.QHBoxLayout",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "PyQt4.QtGui",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "PyQt4.QtGui.QHBoxLayout",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "PyQt4.QtGui",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "PyQt4.QtGui.QHBoxLayout",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "PyQt4.QtGui",
"line_number": 70,
"usage_type": "name"
},
{
"api_name": "PyQt4.QtGui.QVBoxLayout",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "PyQt4.QtGui",
"line_number": 96,
"usage_type": "name"
},
{
"api_name": "eclib.EC",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "eclib.DiffieHellman",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "base64.b64encode",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "hashlib.md5",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "Crypto.Cipher.AES.new",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "Crypto.Cipher.AES",
"line_number": 123,
"usage_type": "name"
},
{
"api_name": "eclib.EC",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "eclib.DiffieHellman",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "base64.b64decode",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "hashlib.md5",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "Crypto.Cipher.AES.new",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "Crypto.Cipher.AES",
"line_number": 152,
"usage_type": "name"
},
{
"api_name": "PyQt4.QtGui.QFileDialog.getOpenFileName",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "PyQt4.QtGui.QFileDialog",
"line_number": 161,
"usage_type": "attribute"
},
{
"api_name": "PyQt4.QtGui",
"line_number": 161,
"usage_type": "name"
},
{
"api_name": "PyQt4.QtGui.QDesktopWidget",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "PyQt4.QtGui",
"line_number": 167,
"usage_type": "name"
},
{
"api_name": "eclib.EC",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "PyQt4.QtGui.QApplication",
"line_number": 186,
"usage_type": "call"
},
{
"api_name": "PyQt4.QtGui",
"line_number": 186,
"usage_type": "name"
},
{
"api_name": "sys.argv",
"line_number": 186,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 189,
"usage_type": "call"
}
] |
30984123610
|
""" apps/docs/urls.py """
from django.urls import path
from . import views
app_name = 'docs'
urlpatterns = [
path('', views.index, name='index'),
path('overview/', views.overview, name='overview'),
path('what_is_an_ordo/', views.what_is_an_ordo, name='what_is_an_ordo'),
path('create_an_account/', views.create_an_account, name='create_an_account'),
path('create_a_calendar/', views.create_a_calendar, name='create_a_calendar'),
path('populate_your_calendar/', views.populate_your_calendar,
name='populate_your_calendar'),
path('create_an_ordo/', views.create_an_ordo, name='create_an_ordo'),
path('for_developers/', views.for_developers, name='for_developers'),
]
|
BrRoman/ordomatic
|
ordomatic/apps/docs/urls.py
|
urls.py
|
py
| 709 |
python
|
en
|
code
| 3 |
github-code
|
6
|
[
{
"api_name": "django.urls.path",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 17,
"usage_type": "call"
}
] |
42579723690
|
import os
import git
import datetime
import argparse
class was_it_rufus:
"""
A class that instantiates all variables and methods about git status.
...
Methods
-------
Prints the git status
"""
def __init__(self, git_directory):
"""
Constructs all the necessary attributes for the Rufus object.
Parameters
----------
repo : git object
repo object to use for various functionalities
active_branch : boolean
true or false if the branch is active or not
local_changes : boolean
true or false if the changes were made local or not
recent_commit : boolean
true or false if there was a recent commit
blame_rufus : boolean
true or false if rufus was the author or not
"""
self.repo = git.Repo(git_directory)
self.active_branch = self.repo.active_branch.name
self.local_changes = self.repo.is_dirty()
self.recent_commit = (datetime.datetime.now().date() - self.repo.head.commit.authored_datetime.date()) < datetime.timedelta(
weeks=1)
self.blame_rufus = self.repo.head.commit.author.name == "Rufus"
def git_status(self):
"""
Prints the details of git status.
Returns
-------
None
"""
print("active branch: ", self.active_branch)
print("local changes: ", self.local_changes)
print("recent commit: ", self.recent_commit)
print("blame Rufus: ", self.blame_rufus)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Was it rufus?')
parser.add_argument('git_directory', type=str, help='name of the git repo directory')
args = parser.parse_args()
#checks if the parsed argument is a directory or not
if os.path.isdir(args.git_directory):
rufus_obj = was_it_rufus(args.git_directory)
rufus_obj.git_status()
else:
print(args.git_directory, "Invalid directory")
|
srirammura/was_it_rufus
|
main.py
|
main.py
|
py
| 2,076 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "git.Repo",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "datetime.timedelta",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "os.path.isdir",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 62,
"usage_type": "attribute"
}
] |
5001531967
|
from datetime import datetime
from django import template
from tag.models import Tag
register = template.Library()
@register.inclusion_tag('toptags.html')
def toptags():
tags=Tag.objects.all().order_by('-followers_count')[:5]
args={}
args['tags']=tags
return args
@register.inclusion_tag('trendingtags.html')
def trending_tags():
today = datetime.now()
tags=Tag.objects.filter(create__year=today.year,create__month=today.month).order_by('-followers_count')[:5]
args={}
args['tags']=tags
return args
@register.inclusion_tag('mytags.html',takes_context=True)
def mytags(context):
request=context['request']
tags=request.user.profile.follow_tags.all()
args={}
args['tags']=tags
return args
|
duonghau/hoidap
|
tag/templatetags/tag_template.py
|
tag_template.py
|
py
| 745 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.template.Library",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "django.template",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "tag.models.Tag.objects.all",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "tag.models.Tag.objects",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "tag.models.Tag",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "tag.models.Tag.objects.filter",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "tag.models.Tag.objects",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "tag.models.Tag",
"line_number": 16,
"usage_type": "name"
}
] |
1059675909
|
"""
This module defines the interface for the Server.
.. autoclass:: Server
:members:
:undoc-members:
:show-inheritance:
"""
import atexit
import base64
import logging
import os
import threading
from functools import partial, wraps
import pluginbase
import tornado.httpserver
import tornado.web
from flask import (Flask, flash, jsonify, redirect, render_template, request,
send_from_directory, session, url_for)
from flask_babel import Babel
from flask_login import current_user, logout_user
from sockjs.tornado import SockJSRouter
from tornado import web
from tornado.ioloop import IOLoop
from tornado.wsgi import WSGIContainer
from opsoro.apps import Apps
from opsoro.console_msg import *
from opsoro.expression import Expression
from opsoro.preferences import Preferences
from opsoro.robot import Robot
from opsoro.server.request_handlers import RHandler
from opsoro.users import SocketConnection, Users
# Helper function
get_path = partial(os.path.join, os.path.abspath(os.path.dirname(__file__)))
class Server(object):
def __init__(self):
self.request_handler = RHandler(self)
# Create flask instance for webserver
self.flaskapp = Flask(__name__)
# self.flaskapp.config['DEBUG'] = True
self.flaskapp.config['TEMPLATES_AUTO_RELOAD'] = True
# Translation support
self.flaskapp.config.from_pyfile('settings.cfg')
self.babel = Babel(self.flaskapp)
# Setup key for sessions
self.flaskapp.secret_key = "5\x075y\xfe$\x1aV\x1c<A\xf4\xc1\xcfst0\xa49\x9e@\x0b\xb2\x17"
# Setup login manager
Users.setup(self.flaskapp)
# Setup app system
Apps.register_apps(self)
# self.activeapp = None
# Initialize all URLs
self.request_handler.set_urls()
# Run stop function at exit
atexit.register(self.at_exit)
def at_exit(self):
print_info('Goodbye!')
# Sleep robot
Robot.sleep()
Apps.stop_all()
if threading.activeCount() > 0:
threads = threading.enumerate()
for thread in threads:
try:
thread.stop()
thread.join()
except AttributeError:
pass
def render_template(self, template, **kwargs):
return self.request_handler.render_template(template, **kwargs)
def run(self):
# Setup SockJS
flaskwsgi = WSGIContainer(self.flaskapp)
self.socketrouter = SockJSRouter(SocketConnection, '/sockjs')
tornado_app = tornado.web.Application(self.socketrouter.urls + [(r".*", tornado.web.FallbackHandler, {"fallback": flaskwsgi})])
tornado_app.listen(80)
# Wake up robot
Robot.wake()
# Start default app
startup_app = Preferences.get('general', 'startup_app', None)
if startup_app in Apps.apps:
self.request_handler.page_openapp(startup_app)
# SSL security
# http_server = tornado.httpserver.HTTPServer(tornado_app, ssl_options={
# "certfile": "/etc/ssl/certs/server.crt",
# "keyfile": "/etc/ssl/private/server.key",
# })
# http_server.listen(443)
try:
# ioloop.PeriodicCallback(UserSocketConnection.dump_stats, 1000).start()
IOLoop.instance().start()
except KeyboardInterrupt:
print_info('Keyboard interupt')
self.at_exit()
def shutdown(self):
logging.info("Stopping server")
io_loop = IOLoop.instance()
io_loop.stop()
def protected_view(self, f):
@wraps(f)
def wrapper(*args, **kwargs):
if current_user.is_authenticated:
if current_user.is_admin:
# the actual page
return f(*args, **kwargs)
else:
flash("You do not have permission to access the requested page. Please log in below.")
return redirect(url_for("login"))
else:
flash("You do not have permission to access the requested page. Please log in below.")
return redirect(url_for("login"))
return wrapper
def app_view(self, f):
appname = f.__module__.split(".")[-1]
@wraps(f)
def wrapper(*args, **kwargs):
# Protected page
if current_user.is_authenticated:
if not current_user.is_admin:
flash("You do not have permission to access the requested page. Please log in below.")
return redirect(url_for("login"))
else:
flash("You do not have permission to access the requested page. Please log in below.")
return redirect(url_for("login"))
# Check if app is active
if appname in Apps.active_apps:
# This app is active
return f(*args, **kwargs)
else:
# Return app not active page
assert appname in Apps.apps, "Could not find %s in list of loaded apps." % appname
data = {
"app": {},
# "appname": appname,
"page_icon": Apps.apps[appname].config["icon"],
"page_caption": Apps.apps[appname].config["full_name"]
}
data["title"] = self.request_handler.title
# if self.activeapp in Apps.apps:
# # Another app is active
# data["app"]["active"] = True
# data["app"]["name"] = Apps.apps[self.activeapp].config["full_name"]
# data["app"]["icon"] = Apps.apps[self.activeapp].config["icon"]
# data["title"] += " - %s" % Apps.apps[self.activeapp].config["full_name"]
# else:
# # No app is active
# data["app"]["active"] = False
return render_template("app_not_active.html", **data)
return wrapper
def app_api(self, f):
appname = f.__module__.split(".")[-1]
@wraps(f)
def wrapper(*args, **kwargs):
# Protected page
if current_user.is_authenticated:
if not current_user.is_admin:
return jsonify(status="error", message="You do not have permission to access the requested page.")
else:
return jsonify(status="error", message="You do not have permission to access the requested page.")
# Check if app is active
if appname in Apps.active_apps:
# This app is active
data = f(*args, **kwargs)
if data is None:
data = {}
if "status" not in data:
data["status"] = "success"
return jsonify(data)
else:
# Return app not active page
assert appname in Apps.apps, "Could not find %s in list of loaded apps." % appname
return jsonify(status="error", message="This app is not active.")
return wrapper
|
OPSORO/OS
|
src/opsoro/server/__init__.py
|
__init__.py
|
py
| 7,251 |
python
|
en
|
code
| 9 |
github-code
|
6
|
[
{
"api_name": "functools.partial",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "opsoro.server.request_handlers.RHandler",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "flask.Flask",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "flask_babel.Babel",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "opsoro.users.Users.setup",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "opsoro.users.Users",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "opsoro.apps.Apps.register_apps",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "opsoro.apps.Apps",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "atexit.register",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "opsoro.robot.Robot.sleep",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "opsoro.robot.Robot",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "opsoro.apps.Apps.stop_all",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "opsoro.apps.Apps",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "threading.activeCount",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "threading.enumerate",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "tornado.wsgi.WSGIContainer",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "sockjs.tornado.SockJSRouter",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "opsoro.users.SocketConnection",
"line_number": 95,
"usage_type": "argument"
},
{
"api_name": "tornado.httpserver.web.Application",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "tornado.httpserver.web",
"line_number": 97,
"usage_type": "attribute"
},
{
"api_name": "tornado.httpserver",
"line_number": 97,
"usage_type": "name"
},
{
"api_name": "opsoro.robot.Robot.wake",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "opsoro.robot.Robot",
"line_number": 101,
"usage_type": "name"
},
{
"api_name": "opsoro.preferences.Preferences.get",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "opsoro.preferences.Preferences",
"line_number": 104,
"usage_type": "name"
},
{
"api_name": "opsoro.apps.Apps.apps",
"line_number": 105,
"usage_type": "attribute"
},
{
"api_name": "opsoro.apps.Apps",
"line_number": 105,
"usage_type": "name"
},
{
"api_name": "tornado.ioloop.IOLoop.instance",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "tornado.ioloop.IOLoop",
"line_number": 117,
"usage_type": "name"
},
{
"api_name": "logging.info",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "tornado.ioloop.IOLoop.instance",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "tornado.ioloop.IOLoop",
"line_number": 124,
"usage_type": "name"
},
{
"api_name": "flask_login.current_user.is_authenticated",
"line_number": 130,
"usage_type": "attribute"
},
{
"api_name": "flask_login.current_user",
"line_number": 130,
"usage_type": "name"
},
{
"api_name": "flask_login.current_user.is_admin",
"line_number": 131,
"usage_type": "attribute"
},
{
"api_name": "flask_login.current_user",
"line_number": 131,
"usage_type": "name"
},
{
"api_name": "flask.flash",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "flask.redirect",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "flask.flash",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "flask.redirect",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "functools.wraps",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "flask_login.current_user.is_authenticated",
"line_number": 149,
"usage_type": "attribute"
},
{
"api_name": "flask_login.current_user",
"line_number": 149,
"usage_type": "name"
},
{
"api_name": "flask_login.current_user.is_admin",
"line_number": 150,
"usage_type": "attribute"
},
{
"api_name": "flask_login.current_user",
"line_number": 150,
"usage_type": "name"
},
{
"api_name": "flask.flash",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "flask.redirect",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "flask.flash",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "flask.redirect",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "opsoro.apps.Apps.active_apps",
"line_number": 158,
"usage_type": "attribute"
},
{
"api_name": "opsoro.apps.Apps",
"line_number": 158,
"usage_type": "name"
},
{
"api_name": "opsoro.apps.Apps.apps",
"line_number": 163,
"usage_type": "attribute"
},
{
"api_name": "opsoro.apps.Apps",
"line_number": 163,
"usage_type": "name"
},
{
"api_name": "opsoro.apps.Apps.apps",
"line_number": 167,
"usage_type": "attribute"
},
{
"api_name": "opsoro.apps.Apps",
"line_number": 167,
"usage_type": "name"
},
{
"api_name": "opsoro.apps.Apps.apps",
"line_number": 168,
"usage_type": "attribute"
},
{
"api_name": "opsoro.apps.Apps",
"line_number": 168,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "functools.wraps",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "flask_login.current_user.is_authenticated",
"line_number": 191,
"usage_type": "attribute"
},
{
"api_name": "flask_login.current_user",
"line_number": 191,
"usage_type": "name"
},
{
"api_name": "flask_login.current_user.is_admin",
"line_number": 192,
"usage_type": "attribute"
},
{
"api_name": "flask_login.current_user",
"line_number": 192,
"usage_type": "name"
},
{
"api_name": "flask.jsonify",
"line_number": 193,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 195,
"usage_type": "call"
},
{
"api_name": "opsoro.apps.Apps.active_apps",
"line_number": 198,
"usage_type": "attribute"
},
{
"api_name": "opsoro.apps.Apps",
"line_number": 198,
"usage_type": "name"
},
{
"api_name": "flask.jsonify",
"line_number": 206,
"usage_type": "call"
},
{
"api_name": "opsoro.apps.Apps.apps",
"line_number": 209,
"usage_type": "attribute"
},
{
"api_name": "opsoro.apps.Apps",
"line_number": 209,
"usage_type": "name"
},
{
"api_name": "flask.jsonify",
"line_number": 211,
"usage_type": "call"
},
{
"api_name": "functools.wraps",
"line_number": 188,
"usage_type": "call"
}
] |
41728763711
|
import os
import datetime
import time
# requires import of opencv through pip
# pip install opencv-python
import cv2
# requires import of PIL pillow through pip
# python -m pip install pillow
from PIL import Image, ImageTk
import sys
import tkinter
def my_VidFunction(vid_name):
cap = cv2.VideoCapture(vid_name)
#check if the video capture is open
if(cap.isOpened() == False):
print("Error Opening Video Stream Or File")
while(cap.isOpened()):
ret, frame =cap.read()
if ret == True:
cv2.namedWindow('frame', cv2.WINDOW_KEEPRATIO)
cv2.setWindowProperty('frame',cv2.WND_PROP_ASPECT_RATIO,cv2.WINDOW_KEEPRATIO)
#cv2.namedWindow('frame', cv2.WND_PROP_FULLSCREEN)
cv2.setWindowProperty('frame', cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
cv2.imshow('frame', frame)
if cv2.waitKey(25) == ord('q'):
break
else:
break
cap.release()
cv2.destroyAllWindows()
def showPIL(pilImage):
root = tkinter.Tk()
w, h = root.winfo_screenwidth(), root.winfo_screenheight()
root.overrideredirect(1)
root.geometry("%dx%d+0+0" % (w, h))
root.focus_set()
root.bind("<Escape>", lambda e: (e.widget.withdraw(), e.widget.quit()))
canvas = tkinter.Canvas(root,width=w,height=h)
canvas.pack()
canvas.configure(background='black')
imgWidth, imgHeight = pilImage.size
if imgWidth > w or imgHeight > h:
ratio = min(w/imgWidth, h/imgHeight)
imgWidth = int(imgWidth*ratio)
imgHeight = int(imgHeight*ratio)
pilImage = pilImage.resize((imgWidth,imgHeight), Image.ANTIALIAS)
image = ImageTk.PhotoImage(pilImage)
imagesprite = canvas.create_image(w/2,h/2,image=image)
root.after(2000, root.destroy)
root.mainloop()
#grab time now
now = datetime.datetime.now()
print('Press ctl and c in terminal or command window to exit slide show.')
print('Make sure default image viewer opens full screen. Then close it when full screen.')
im = Image.open(r"black_all.png")
showPIL(im)
while (True):
# open method used to open different extension image file
# resized_im = im.resize((round(im.size[0]*4), round(im.size[1]*4)))
# This method will show image in any image viewer
# resized_im.show()
if ((now.hour >= 8) and (now.hour <= 22 )):
if ((now.hour == 12) and ((now.minute % 30 == 0) or (now.minute % 30 == 1))) or ((now.hour == 17) and ((now.minute % 60 == 0) or (now.minute % 60 == 1))):
my_VidFunction('ShiftChangeYouTubeHIGH.mp4')
# grab time again, so not using time at stop of this loop instance
now = datetime.datetime.now()
else:
my_VidFunction('cci_icommons_album.mp4')
# grab time again, so not using time at stop of this loop instance
now = datetime.datetime.now()
else:
showPIL(im)
# close the image
#im.close()
#resized_im.close()
|
icommonscrc/Looney-Toon
|
OpenVideoAtTimeV8.py
|
OpenVideoAtTimeV8.py
|
py
| 2,716 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "cv2.VideoCapture",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "cv2.namedWindow",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "cv2.WINDOW_KEEPRATIO",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "cv2.setWindowProperty",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "cv2.WND_PROP_ASPECT_RATIO",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "cv2.WINDOW_KEEPRATIO",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "cv2.setWindowProperty",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "cv2.WND_PROP_FULLSCREEN",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "cv2.WINDOW_FULLSCREEN",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "cv2.imshow",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "cv2.destroyAllWindows",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "tkinter.Tk",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "tkinter.Canvas",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "PIL.Image.ANTIALIAS",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "PIL.ImageTk.PhotoImage",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "PIL.ImageTk",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image.open",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 80,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 84,
"usage_type": "attribute"
}
] |
35023516423
|
import cv2
import time
from base_camera import BaseCamera
from Process import Process
global sess
class Camera(BaseCamera):
video_source = 0
process = Process()
@staticmethod
def set_video_source(source):
Camera.video_source = source
# @staticmethod
def frames(self):
camera = cv2.VideoCapture(Camera.video_source)
video_FourCC = int(camera.get(cv2.CAP_PROP_FOURCC))
video_fps = camera.get(cv2.CAP_PROP_FPS)
video_size = (int(camera.get(cv2.CAP_PROP_FRAME_WIDTH)),
int(camera.get(cv2.CAP_PROP_FRAME_HEIGHT)))
if not camera.isOpened():
raise RuntimeError('Could not start camera.')
print("!!! TYPE:", type(video_FourCC), type(video_fps), type(video_size))
accum_time = 0
curr_fps = 0
fps = "FPS: ??"
font = cv2.FONT_HERSHEY_SIMPLEX
# prev_time = timer()
in_num = 0
out_num = 0
num = 0
data = {}
while True:
# read current frame
print(in_num, out_num, "in_numin_numin_numin_numin_numin_numoutin_numin_numin_numin_numin_num")
_, img = camera.read()
if img is None:
break
result, centers = self.process.process(img)
for center in centers:
x, y, w, h = center
# cv2.circle(result,(x,y), 30, (0,0,255), -1)
if len(data) == 0:
data[f'{x},{y},{w},{h},{num}'] = [x, y, w, h, x, y, w, h] # 最初检测点 最后检测点
continue
for key in list(data.keys()):
tx, ty, tw, th, tn = key.split(',')
tx, ty, tw, th, tn = int(tx), int(ty), int(tw), int(th), int(tn)
if num - tn > 4:
del data[key]
continue
else:
print('distance', self.process.overlap([x, y, w, h], [tx, ty, tw, th]))
if self.process.overlap([x, y, w, h], [tx, ty, tw, th]) > 0.5:
value = data[key]
value[4], value[5], value[6], value[7] = x, y, w, h
del data[key]
data[f'{x},{y},{w},{h},{num}'] = value
else:
data[f'{x},{y},{w},{h},{num}'] = [x, y, w, h, x, y, w, h]
for key in list(data.keys()):
value = data[key]
y1 = value[1] + value[3] // 2
y2 = value[5] + value[7] // 2
# print(y1,y2,"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")
if y1 < 700 and y2 >= 700:
del data[key]
out_num += 1
continue
elif y1 > 700 and y2 < 700:
del data[key]
in_num += 1
continue
elif num == video_fps:
num = 0
tx, ty, tn = key.split(',')
if video_fps - int(tn) > 4:
del data[key]
continue
else:
del data[key]
data[f'{tx},{ty},{num}'] = value
cv2.line(result, (0, 700), (800, 700), (0, 0, 255), 5)
cv2.putText(result, f'in: {in_num} out: {out_num}', (50, 780), font, 1.5, (0, 0, 255), 2)
cv2.namedWindow("result", cv2.WINDOW_NORMAL)
cv2.imshow("result", result)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
print(data.keys(), "data.keys()data.keys()data.keys()")
# encode as a jpeg image and return it
yield cv2.imencode('.jpg', img)[1].tobytes()
|
Micbetter/ISense-flow
|
camera_opencv.py
|
camera_opencv.py
|
py
| 3,981 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "base_camera.BaseCamera",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "Process.Process",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "cv2.VideoCapture",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "cv2.CAP_PROP_FOURCC",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "cv2.CAP_PROP_FPS",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "cv2.CAP_PROP_FRAME_WIDTH",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "cv2.CAP_PROP_FRAME_HEIGHT",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "cv2.FONT_HERSHEY_SIMPLEX",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "cv2.line",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "cv2.putText",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "cv2.namedWindow",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "cv2.WINDOW_NORMAL",
"line_number": 89,
"usage_type": "attribute"
},
{
"api_name": "cv2.imshow",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "cv2.imencode",
"line_number": 97,
"usage_type": "call"
}
] |
8665817734
|
import os
import boto3
from elasticsearch import Elasticsearch
from unittest import TestCase
from me_articles_drafts_delete import MeArticlesDraftsDelete
from tests_util import TestsUtil
class TestMeArticlesDraftsDelete(TestCase):
dynamodb = boto3.resource('dynamodb', endpoint_url='http://localhost:4569/')
elasticsearch = Elasticsearch(
hosts=[{'host': 'localhost'}]
)
@classmethod
def setUpClass(cls):
TestsUtil.set_all_tables_name_to_env()
os.environ['DOMAIN'] = 'example.com'
cls.article_info_table = cls.dynamodb.Table('ArticleInfo')
cls.article_content_table = cls.dynamodb.Table('ArticleContent')
cls.article_content_edit_table = cls.dynamodb.Table('ArticleContentEdit')
cls.article_history_table = cls.dynamodb.Table('ArticleHistory')
cls.tag_table = cls.dynamodb.Table('Tag')
def setUp(self):
TestsUtil.delete_all_tables(self.dynamodb)
article_info_items = [
{
'article_id': 'draftId00001',
'user_id': 'test01',
'status': 'draft',
'tags': ['a', 'b', 'c'],
'eye_catch_url': 'https://' + os.environ['DOMAIN'] + '/00001.png',
'sort_key': 1520150272000000,
'version': 2
},
{
'article_id': 'draftId00002',
'user_id': 'test01',
'status': 'public',
'sort_key': 1520150272000000,
'version': 2
}
]
TestsUtil.create_table(self.dynamodb, os.environ['ARTICLE_INFO_TABLE_NAME'], article_info_items)
def tearDown(self):
TestsUtil.delete_all_tables(self.dynamodb)
def assert_bad_request(self, params):
me_articles_drafts_publish_with_header = MeArticlesDraftsDelete(params, {}, dynamodb=self.dynamodb)
response = me_articles_drafts_publish_with_header.main()
self.assertEqual(response['statusCode'], 400)
def test_main_ok(self):
params = {
'pathParameters': {
'article_id': 'draftId00001'
},
'requestContext': {
'authorizer': {
'claims': {
'cognito:username': 'test01',
'phone_number_verified': 'true',
'email_verified': 'true'
}
}
}
}
response = MeArticlesDraftsDelete(params, {}, dynamodb=self.dynamodb).main()
self.assertEqual(response['statusCode'], 200)
article_info = self.article_info_table.get_item(Key={'article_id': params['pathParameters']['article_id']})['Item']
self.assertEqual(article_info['status'], 'delete')
def test_main_ng_with_public_article(self):
params = {
'pathParameters': {
'article_id': 'publicId00002'
},
'requestContext': {
'authorizer': {
'claims': {
'cognito:username': 'test01',
'phone_number_verified': 'true',
'email_verified': 'true'
}
}
}
}
response = MeArticlesDraftsDelete(params, {}, dynamodb=self.dynamodb).main()
self.assertEqual(response['statusCode'], 400)
def test_validation_with_no_article_id(self):
params = {
'queryStringParameters': {},
'requestContext': {
'authorizer': {
'claims': {
'cognito:username': 'test01',
'phone_number_verified': 'true',
'email_verified': 'true'
}
}
}
}
self.assert_bad_request(params)
def test_validation_article_id_max(self):
params = {
'queryStringParameters': {
'article_id': 'A' * 13,
},
'requestContext': {
'authorizer': {
'claims': {
'cognito:username': 'test01',
'phone_number_verified': 'true',
'email_verified': 'true'
}
}
}
}
self.assert_bad_request(params)
def test_validation_article_id_min(self):
params = {
'queryStringParameters': {
'article_id': 'A' * 11,
},
'requestContext': {
'authorizer': {
'claims': {
'cognito:username': 'test01',
'phone_number_verified': 'true',
'email_verified': 'true'
}
}
}
}
self.assert_bad_request(params)
|
AlisProject/serverless-application
|
tests/handlers/me/articles/drafts/delete/test_me_articles_drafts_delete.py
|
test_me_articles_drafts_delete.py
|
py
| 4,930 |
python
|
en
|
code
| 54 |
github-code
|
6
|
[
{
"api_name": "unittest.TestCase",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "boto3.resource",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "elasticsearch.Elasticsearch",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "tests_util.TestsUtil.set_all_tables_name_to_env",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "tests_util.TestsUtil",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "os.environ",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "tests_util.TestsUtil.delete_all_tables",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "tests_util.TestsUtil",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "os.environ",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "tests_util.TestsUtil.create_table",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "tests_util.TestsUtil",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "os.environ",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "tests_util.TestsUtil.delete_all_tables",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "tests_util.TestsUtil",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "me_articles_drafts_delete.MeArticlesDraftsDelete",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "me_articles_drafts_delete.MeArticlesDraftsDelete",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "me_articles_drafts_delete.MeArticlesDraftsDelete",
"line_number": 97,
"usage_type": "call"
}
] |
23225332326
|
import copy
import six
from lxml import etree
from ems.exceptions import SchemaException
from ems.exceptions import ValidationException
from ems.exceptions import XMLException
from ems.schema import fields
def parse_meta(name, bases, dct):
"""
Parse the _META_ attribute from a schema definition.
"""
# Set default metadata
schema_meta = {
# The XML tag for the root element.
'tag': name,
# Validate the schema values when rendering.
'validate': True,
# Always omit empty values for fields which are not required.
'omit_empty': False,
# Omit strings which are equal to ''
'omit_blank': False,
# Validation fails for Choice types with more than one value populated.
'strict_choice': False,
}
# Sets up defaults if the _META_ attribute is not found.
if '_META_' in dct:
for k, v in six.iteritems(dct['_META_']):
schema_meta[k] = v
# Remove original definition
del dct['_META_']
dct['_schema_meta'] = schema_meta
def parse_fields(name, bases, dct):
"""
Parse the _SCHEMA_ attribute and set up the appropriate methods for each
defined field.
"""
if '_SCHEMA_' not in dct:
raise SchemaException('No _SCHEMA_ attribute found for %s' % name)
# Holds the fields from the schema definition
schema_fields = {}
# Holds a reverse lookup to the fields from their tag names. Used when
# parsing an XML into an object.
field_lookup = {}
for k, v in six.iteritems(dct['_SCHEMA_']):
if not isinstance(v, dict):
raise SchemaException('Schema definitions must be dict objects')
# Default to string type.
field_type = v.get('type', 'string')
# Tag name defaults to field name.
if 'tag' not in v:
v['tag'] = k
# Get field class from factory helper method.
schema_fields[k] = fields.factory(field_type, **v)
# Lookup for the XML tag -> attribute name
field_lookup[v['tag']] = k
# Create new property functions for the field values.
# Functions are wrapped to force 'k' to be evaluated now. Otherwise k
# will always be the value of the last element in the loop.
def wrap_get_f(k=k):
def get_f(self):
return self._schema_fields[k].value
return get_f
def wrap_set_f(k=k):
def set_f(self, value):
self._schema_fields[k].value = value
return set_f
dct[k] = property(wrap_get_f(), wrap_set_f())
# Remove the original schema definition and add the new one
del dct['_SCHEMA_']
dct['_schema_fields'] = schema_fields
dct['_field_lookup'] = field_lookup
def webservice_meta(**kwds):
"""
Class decorator for creating new web service objects.
Takes _META_ args as keyword arguments.
"""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
slots = orig_vars.get('__slots__')
if slots is not None:
if isinstance(slots, str):
slots = [slots]
for slots_var in slots:
orig_vars.pop(slots_var)
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
if orig_vars.get('_META_', None) is None:
orig_vars['_META_'] = {}
if kwds is not None:
for k, v in six.iteritems(kwds):
orig_vars['_META_'][k] = v
return WebServiceMeta(cls.__name__, cls.__bases__, orig_vars)
return wrapper
class WebServiceMeta(type):
"""
Metaclass used to create new WebService objects from a schema, defined
as a dictionary.
"""
def __new__(meta, name, bases, dct):
# Sets up the _schema_meta attribute.
parse_meta(name, bases, dct)
# Sets up the _schema_fields attribute.
parse_fields(name, bases, dct)
return super(WebServiceMeta, meta).__new__(meta, name, bases, dct)
class WebServiceObject(object):
"""
Base class for objects to be serialized/deserialized for the API. Also
used by nested objects.
Subclasses should also use the WebServiceMeta metaclass.
"""
def __init__(self, **kwds):
# Ensure that the field values are NOT shared across instances of the
# same class.
self._schema_fields = copy.deepcopy(self._schema_fields)
# Allow any field to be set via keyword arguments
for k, v in six.iteritems(kwds):
if k in self._schema_fields:
self._schema_fields[k].value = v
else:
raise TypeError('%s got unexpected keyword argument %s' %
(self.__class__.__name__, k))
def validate(self):
"""
Checks whether the values are valid for the class schema.
Returns None if valid. Otherwise, raises ValidationException.
"""
for field in six.itervalues(self._schema_fields):
field.validate()
def is_valid(self):
"""
Convenience wrapper for validate() to return True or False.
"""
try:
self.validate()
except ValidationException:
return False
return True
def to_element(self, root_element=None):
"""
Returns the object as an lxml.Element instance.
"""
if root_element is None:
root_element = etree.Element(self._schema_meta['tag'])
for field in six.itervalues(self._schema_fields):
if self._schema_meta['validate']:
field.validate()
children = field.to_element(
omit_empty=self._schema_meta['omit_empty'],
omit_blank=self._schema_meta['omit_blank'])
# Append each child element if the rendered elements form a list.
# This means that each child gets a root tag. E.g.
# <attribute>
# <a>1</a>
# </attribute>
# <attribute>
# <a>2</a>
# </attribute>
if isinstance(children, list):
[root_element.append(elem)
for elem
in children]
elif children is not None:
root_element.append(children)
return root_element
def render(self, pretty=False):
"""
Renders the object into an XML string representation.
"""
element = self.to_element()
return etree.tostring(element, pretty_print=pretty)
@classmethod
def parse(cls, root_element, strict=True):
"""
Returns a new instance of the class from an XML.
"""
# New instance of myself to return
obj = cls()
for elem in root_element:
attr_name = cls._field_lookup.get(elem.tag, None)
if attr_name is None and strict:
raise XMLException('Unexpected element: %s' % elem.tag)
elif attr_name is None:
continue
# Field objects should provide a parse method
obj._schema_fields[attr_name].parse(elem)
return obj
@classmethod
def from_file(cls, filename, strict=True):
"""
Parse an XML from a file.
"""
tree = etree.parse(filename)
root = tree.getroot()
return cls.parse(root, strict=strict)
@classmethod
def from_text(cls, text, strict=True):
"""
Parse an XML from a string.
"""
bytes_ = six.BytesIO(text.encode('utf-8'))
return cls.from_file(bytes_)
|
ceramyq/python-ems
|
ems/schema/base.py
|
base.py
|
py
| 7,670 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "six.iteritems",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "ems.exceptions.SchemaException",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "six.iteritems",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "ems.exceptions.SchemaException",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "ems.schema.fields.factory",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "ems.schema.fields",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "six.iteritems",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "six.iteritems",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "six.itervalues",
"line_number": 172,
"usage_type": "call"
},
{
"api_name": "ems.exceptions.ValidationException",
"line_number": 181,
"usage_type": "name"
},
{
"api_name": "lxml.etree.Element",
"line_number": 191,
"usage_type": "call"
},
{
"api_name": "lxml.etree",
"line_number": 191,
"usage_type": "name"
},
{
"api_name": "six.itervalues",
"line_number": 193,
"usage_type": "call"
},
{
"api_name": "lxml.etree.tostring",
"line_number": 224,
"usage_type": "call"
},
{
"api_name": "lxml.etree",
"line_number": 224,
"usage_type": "name"
},
{
"api_name": "ems.exceptions.XMLException",
"line_number": 238,
"usage_type": "call"
},
{
"api_name": "lxml.etree.parse",
"line_number": 252,
"usage_type": "call"
},
{
"api_name": "lxml.etree",
"line_number": 252,
"usage_type": "name"
},
{
"api_name": "six.BytesIO",
"line_number": 262,
"usage_type": "call"
}
] |
40106131495
|
from .auto import Auto
import gettext
def russian(text):
text = text.replace("usage:",
"использование:")
text = text.replace("show this help message and exit",
"показывает это сообщение и выходит")
text = text.replace("error:",
"ошибка:")
text = text.replace("the following arguments are required",
"требуются следующие аргументы")
text = text.replace("argument ",
"аргумент ")
text = text.replace("invalid choice",
"недопустимый вариант")
text = text.replace("choose from ",
"выберите из следующих ")
return text
gettext.gettext = russian
import argparse
gettext.bindtextdomain("argparse", "")
gettext.textdomain("argparse")
DESCRIPTION = """Система сборки Auto"""
def cli(function):
"""
Декоратор, создаёт командную утилиту из функции
:param function: - результат работы декоратора configure
Использование:
~~~~~~~~
::
@cli
@configure(**kwargs)
def test():
...
Все цели будут доступны для исполнения
*python source.py -h*
"""
manager: Auto = function()
parser = argparse.ArgumentParser(description=DESCRIPTION)
parser._positionals.title = "Позиционные аргументы"
parser._optionals.title = "Опции"
choices = manager.targets.keys()
parser.add_argument("target", type=str, help="цель сборки", choices=choices)
namespace = parser.parse_args()
manager.execute(namespace.target)
|
Papr1ka/config
|
practice4/auto/cli.py
|
cli.py
|
py
| 1,875 |
python
|
ru
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "gettext.gettext",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "gettext.bindtextdomain",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "gettext.textdomain",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "auto.Auto",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 52,
"usage_type": "call"
}
] |
21529376169
|
from .base_view import ClassView
def get_model_value(instance, field):
try:
value = getattr(instance, field)
except Exception:
if field.find('__') > 0:
fields = field.split('__')
elif field.find('.') > 0:
fields = field.split('.')
else:
raise Exception()
value = instance
for field in fields:
value = getattr(value, field)
if value is None:
return None
return value
class ModelListView(ClassView):
fields = None
has_pagination = True
default_per_page = 10
def __init__(self, request):
ClassView.__init__(self, request)
get = request.GET.get
self.per_page = get("per_page", self.default_per_page)
self.per_page = int(self.per_page)
self.page = get("page", 1)
self.page = int(self.page)
self.search = get("search", "")
self.sort_param = get("sort", None)
self.data = {}
self.json_string = None
self.validate_fields()
def validate_fields(self):
fields = self.fields
if type(fields) not in (list, tuple):
raise Exception("Fields format is not valid.")
new_fields = []
for field in fields:
if type(field) in (list, tuple):
if len(field) > 2:
raise Exception("Fields format is not valid.")
field, json_key = field
else:
field, json_key = field, field
new_fields.append((field, json_key))
self.fields = new_fields
def get_query(self):
raise NotImplementedError()
def to_json(self, record):
fields = self.fields
data = {}
for field, json_key in fields:
fn = getattr(self, 'render_' + field, None)
if fn is not None:
value = fn(record)
else:
value = get_model_value(record, field)
data[json_key] = value
return data
# noinspection PyBroadException
def sort(self, query):
sort = self.sort_param
if not sort:
return query
try:
param, asc = sort.split("|")
if asc != "asc":
param = "-" + param
return query.order_by(param)
except Exception:
return query
def get_count(self, query):
from django.db.models.query import QuerySet
if type(query) is QuerySet:
return query.count()
return len(query)
def process(self, request):
query = self.get_query()
fields = [a for a, b in self.fields]
if len(fields) > 0:
query = query.only(*fields)
query = self.sort(query)
if self.has_pagination:
data = self.paginate(query)
else:
data = [self.to_json(record) for record in query]
self.add("status", True)
self.add('total', self.get_count(query))
self.add("data", data)
def paginate(self, query):
from django.core.paginator import Paginator
if not self.has_pagination:
return
paginator = Paginator(query, self.per_page)
page = paginator.page(self.page)
records = page.object_list
data = [self.to_json(record) for record in records]
self.add("last_page", paginator.num_pages)
self.add("from", page.start_index())
self.add("current_page", self.page)
self.add("per_page", self.per_page)
self.add("to", page.end_index())
return data
|
sajithak52/store-django-app
|
myproject/base_class/views/list_view.py
|
list_view.py
|
py
| 3,646 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "base_view.ClassView",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "base_view.ClassView.__init__",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "base_view.ClassView",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "django.db.models.query.QuerySet",
"line_number": 101,
"usage_type": "name"
},
{
"api_name": "django.core.paginator.Paginator",
"line_number": 129,
"usage_type": "call"
}
] |
31333928448
|
#!/usr/bin/env python3
import atexit
import copy
import datetime
import json
import os
import re
import sys
import threading
import botocore
from flask import Flask
from prometheus_client import make_wsgi_app, Gauge
from pyemvue import PyEmVue
from pyemvue.enums import Scale, Unit
from werkzeug.middleware.dispatcher import DispatcherMiddleware
from werkzeug.serving import run_simple
poller_thread = threading.Thread()
minutesInAnHour = 60
wattsInAKw = 1000.0
USAGE_WATTS = Gauge(f'per_min_usage_total', f'Total usage for channel in watts.', labelnames=['channel', 'channel_num', 'device_name', 'device_gid', ], unit="watt")
EXCLUDED_CHANNELS = ['Balance', 'TotalUsage', ]
devices = {}
def log(level, msg):
now = datetime.datetime.utcnow()
print('{} | {} | {}'.format(now, level.ljust(5), msg), flush=True)
def debug(msg):
log("INFO", msg)
def info(msg):
log("INFO", msg)
def error(msg):
log("ERROR", msg)
def die():
global poller_thread
error('Caught exit signal')
try:
poller_thread.cancel()
except Exception as e:
pass
info('Shutting down.')
sys.exit(0)
def handle_exit(signum, frame):
die()
# get usage for each device
def get_usage_data(device_names, device):
device_name = device_names.get(device.device_gid, 'Unknown Device')
info(f'Device: #{device.device_gid} "{device_name}" has {len(device.channels.items())} channels.')
# Recurse thru the various channels, gathering rosebuds
for number, channel in device.channels.items():
if number in EXCLUDED_CHANNELS:
debug(f'Excluding data from channel "{number}".')
continue
if channel.nested_devices:
for gid, dev in channel.nested_devices.items():
debug(f'Recursing into nested devices for channel "{number}".')
get_channel_usage(device_names, dev)
kwhUsage = channel.usage
if kwhUsage is not None:
channel_label = re.sub(r'_+', '_', re.sub(r'[^a-z0-9_]','_', channel.name.lower(), re.I | re.M))
watts = wattsInAKw * minutesInAnHour * kwhUsage
USAGE_WATTS.labels(channel_label, number, device_name, device.device_gid).set(watts)
info(f'Channel #{number} - {channel.name} recorded as {channel_label}.')
# Thread to poll emporia for data
def poll_emporia(vue=None, retry_login=False, devices={}, poll_interval=60):
global poller_thread
# retry login if needed
if retry_login:
try:
info('logging in')
vue.login(username=os.environ.get('VUE_USERNAME'), password=os.environ.get('VUE_PASSWORD'))
info('successfully logged in')
except Exception as e:
error(f'Exception occurred during login: {e}')
info(f'skipping run and trying again in {poll_interval} seconds')
poller_thread = threading.Timer(poll_interval, poll_emporia, kwargs={"vue":vue, "retry_login":True, "devices":devices, "poll_interval":60,} )
poller_thread.start()
return
try:
device_list = vue.get_devices()
info(f'found {len(device_list)} devices.')
# give the system time to catch up with data
timestamp = datetime.datetime.utcnow() - datetime.timedelta(seconds=15)
device_names = dict(
filter(lambda x: x[1],
map(lambda x:(x.device_gid, x.device_name), device_list)))
# get the usage
device_usage = vue.get_device_list_usage(list(map(lambda d: d.device_gid, device_list)), timestamp, scale=Scale.MINUTE.value, unit=Unit.KWH.value)
if not device_usage:
return
for gid, device in device_usage.items():
get_usage_data(device_names, device)
info(f'Finished polling run; next run in {poll_interval} seconds.')
poller_thread = threading.Timer(poll_interval, poll_emporia, kwargs={"vue":vue, "retry_login":False, "devices":devices, "poll_interval":60,} )
poller_thread.start()
except Exception as e:
error(f'Exception occurred: {e}')
info('restarting poll with login retry after 5s.')
poller_thread = threading.Timer(5, poll_emporia, kwargs={"vue":vue, "retry_login":True, "devices":devices, "poll_interval":60,} )
poller_thread.start()
return
def create_app(devices):
global poller_thread
app = Flask(__name__)
poll_interval = 60
vue = PyEmVue()
info(f'Launching first poll.')
poller_thread = threading.Timer(1, poll_emporia, kwargs={"vue":vue, "retry_login":True, "devices":devices, "poll_interval":60,} )
poller_thread.start()
atexit.register(handle_exit)
return app
deviceFilename = os.environ.get('VUE_DEVICES_FILE')
if deviceFilename:
try:
with open(deviceFilename) as deviceFile:
devices = json.load(deviceFile)
except FileNotFoundError:
info(f'No device list file found at {deviceFilename}')
try:
app = create_app(devices.get('devices', {}))
except:
error('Unable to log in - check VUE_USERNAME/VUE_PASSWORD')
sys.exit(-2)
# add /metrics prom dumper
app.wsgi_app = DispatcherMiddleware(app.wsgi_app, {
'/metrics': make_wsgi_app()
})
|
thebaron/vueprom
|
src/vueprom.py
|
vueprom.py
|
py
| 5,230 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "threading.Thread",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "prometheus_client.Gauge",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.utcnow",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "re.I",
"line_number": 80,
"usage_type": "attribute"
},
{
"api_name": "re.M",
"line_number": 80,
"usage_type": "attribute"
},
{
"api_name": "os.environ.get",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 93,
"usage_type": "attribute"
},
{
"api_name": "threading.Timer",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.utcnow",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 107,
"usage_type": "attribute"
},
{
"api_name": "datetime.timedelta",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "pyemvue.enums.Scale.MINUTE",
"line_number": 114,
"usage_type": "attribute"
},
{
"api_name": "pyemvue.enums.Scale",
"line_number": 114,
"usage_type": "name"
},
{
"api_name": "pyemvue.enums.Unit.KWH",
"line_number": 114,
"usage_type": "attribute"
},
{
"api_name": "pyemvue.enums.Unit",
"line_number": 114,
"usage_type": "name"
},
{
"api_name": "threading.Timer",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "threading.Timer",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "flask.Flask",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "pyemvue.PyEmVue",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "threading.Timer",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "atexit.register",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "os.environ.get",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 148,
"usage_type": "attribute"
},
{
"api_name": "json.load",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "werkzeug.middleware.dispatcher.DispatcherMiddleware",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "prometheus_client.make_wsgi_app",
"line_number": 164,
"usage_type": "call"
}
] |
23932718939
|
import os
import numpy as np
import pandas as pd
from variables import csv_path, label_encode, file_name, cutoff
from sklearn.utils import shuffle
def preprocess_data(csv_path):
df = pd.read_csv(csv_path)
df = df.copy()
df.dropna(axis=0, how='any', inplace=False)
df['label'] = df.apply(y2indicator, axis=1)
del df['species']
df = shuffle(df)
df.to_csv(file_name, encoding='utf-8')
def y2indicator(x):
species = x['species']
return label_encode[species]
def get_data():
if not os.path.exists(file_name):
preprocess_data(csv_path)
df = pd.read_csv(file_name)
Xdata = df.copy()[['sepal_length', 'sepal_width', 'petal_length', 'petal_width']].to_numpy()
Ydata = df.copy()[['label']].to_numpy()
train_set = int(cutoff * len(df))
Xtrain, Xtest = Xdata[:train_set], Xdata[train_set:]
Ytrain, Ytest = Ydata[:train_set], Ydata[train_set:]
return Xtrain, Xtest, Ytrain, Ytest
def one_hot_encode(Ydata):
N = len(Ydata)
num_classes = 3
y = np.eye(num_classes)
return y[Ydata]
|
1zuu/Pytroch-Examples
|
IrishClassifier/util.py
|
util.py
|
py
| 1,061 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "pandas.read_csv",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "variables.csv_path",
"line_number": 8,
"usage_type": "argument"
},
{
"api_name": "sklearn.utils.shuffle",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "variables.file_name",
"line_number": 14,
"usage_type": "argument"
},
{
"api_name": "variables.label_encode",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "os.path.exists",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "variables.file_name",
"line_number": 21,
"usage_type": "argument"
},
{
"api_name": "os.path",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "variables.csv_path",
"line_number": 22,
"usage_type": "argument"
},
{
"api_name": "pandas.read_csv",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "variables.file_name",
"line_number": 23,
"usage_type": "argument"
},
{
"api_name": "variables.cutoff",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "numpy.eye",
"line_number": 34,
"usage_type": "call"
}
] |
32055141770
|
import sqlite3
connection = sqlite3.connect('data.db')
cursor = connection.cursor()
def initiate_db():
cursor.execute('CREATE TABLE IF NOT EXISTS books(name text primary key, author text, year integer, read integer)')
connection.commit()
def add_db(book, author, year):
try:
cursor.execute("INSERT INTO books VALUES(?, ?, ?, 0)", (book, author, year))
connection.commit()
return (f"Added '{book}' {author} {year} to the book store.")
except sqlite3.IntegrityError:
return f"BOOK: '{book}' by {author} already exists in the database."
def retrive_db():
cursor.execute('SELECT * from books')
books_db = [{"BOOK": row[0], "AUTHOR": row[1], "YEAR": str(row[2]), "READ": row[3] } for row in cursor.fetchall()]
if not books_db:
return [{"BOOK": 0, "AUTHOR": 0, "YEAR": 0, "READ": 0}]
return books_db
def _check_book(book_info):
books_found = [book for book in retrive_db() if book_info in book.values()]
return books_found
def mark_db(book):
if [book_name for book_name in _check_book(book) if book_name["BOOK"] == book]:
cursor.execute("UPDATE books SET read = 1 WHERE name = ?", (book,))
connection.commit()
return f"Marked Book:'{book}' as 'Read' "
return f"Book '{book}'' doesn't exist in Book Store"
def delete_db(book):
if _check_book(book) and _check_book(book)[0]["BOOK"] == book:
cursor.execute("DELETE FROM books WHERE name = ?",(book,))
connection.commit()
return f"'{book}' is removed from database"
return f"Book '{book}' doesn't exist in Book Store"
|
minnalisa/book_shelf
|
database2.py
|
database2.py
|
py
| 1,624 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "sqlite3.connect",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "sqlite3.IntegrityError",
"line_number": 16,
"usage_type": "attribute"
}
] |
9434937105
|
import random
import datetime
import urllib
from optparse import make_option
from django.core.management.base import BaseCommand
from django.core.files.storage import default_storage
from django.core.files.base import File
try:
from django.contrib.auth import get_user_model
User = get_user_model()
except ImportError:
from django.contrib.auth.models import User # NOQA
from fusionbox.blog.models import Blog
word_list = [
'john', 'intense', 'lucky', 'solid', 'hot', 'clever', 'amusing',
'wicked', 'damp', 'sticky', 'warm', 'courteous', 'young', 'slow',
'selfish', 'great', 'vigorous', 'glamorous', 'clean', 'placid',
'enthusiastic', 'instinctive', 'wild', 'hurt', 'tricky',
'diplomatic', 'sympathetic', 'painstaking', 'raspy', 'proud',
'thoughtful', 'delicious', 'itchy', 'cute', 'debtor', 'trip',
'france', 'cone', 'missile', 'statistic', 'equipment', 'push',
'fine', 'antarctica', 'apparel', 'meteorology', 'tsunami', 'head',
'balance', 'fowl', 'spoon', 'croissant', 'library', 'purchase',
'staircase', 'wasp', 'carnation', 'cannon', 'bronze', 'glass',
'kendo', 'cello', 'taiwan', 'shape', 'cauliflower', 'green',
'run', 'scarf', 'tower', 'regret', 'disgust', 'roof', 'hen',
'law',
]
tags = ['broccoli', 'violin', 'disintermediate', 'infomediaries', '"compelling synergy"']
names = ['John', 'Patrick', 'Alberto', 'Bertha', 'Claudette', 'Arlene', 'Vince']
def random_text(nwords, choices=word_list):
words = []
got_words = 0
while got_words < nwords:
word = random.choice(choices)
if got_words % 10 == 0 and got_words != 0:
word += '.'
if got_words % 50 == 0 and got_words != 0:
words += '\n\n\n'
words.append(word)
got_words += 1
return ' '.join(words)
def random_image(word='unicorn'):
tmpfile, header = urllib.urlretrieve('http://placenoun.com/' + urllib.quote_plus(word))
name = random_text(3)
return default_storage.save(name, File(open(tmpfile), name=name))
class Command(BaseCommand):
help = "Creates some random blogs"
option_list = BaseCommand.option_list + (
make_option('--images',
action='store_true',
default=False,
help='Include some random images'),
)
def handle(self, *args, **options):
author = User.objects.create(
first_name=random_text(1, names),
last_name=random_text(1, names),
username=random_text(3).replace(' ', ''),
email="%s@%s.com" % (random_text(2), random_text(1)),
)
for i in range(25):
body = random_text(500)
title_first = random_text(1)
title = title_first + ' ' + random_text(4)
Blog.objects.create(
title=title,
seo_title='Blog ' + title,
seo_keywords=random_text(5),
seo_description=body[:40],
author=author,
summary=body[:40],
body=body,
tags=random_text(2, tags),
is_published=True,
publish_at=datetime.datetime.now() - datetime.timedelta(days=random.randint(1, 1000)),
created_at=datetime.datetime.now() - datetime.timedelta(days=random.randint(1, 1000)),
image=random_image(title_first) if options['images'] and random.randint(0, 3) == 0 else None,
)
|
fusionbox/django-fusionbox-blog
|
fusionbox/blog/management/commands/seed_blogs.py
|
seed_blogs.py
|
py
| 3,513 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "django.contrib.auth.get_user_model",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "urllib.urlretrieve",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "urllib.quote_plus",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "django.core.files.storage.default_storage.save",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "django.core.files.storage.default_storage",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "django.core.files.base.File",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "django.core.management.base.BaseCommand",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "django.core.management.base.BaseCommand.option_list",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "django.core.management.base.BaseCommand",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "optparse.make_option",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.models.User.objects.create",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.models.User.objects",
"line_number": 69,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.auth.models.User",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "fusionbox.blog.models.Blog.objects.create",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "fusionbox.blog.models.Blog.objects",
"line_number": 80,
"usage_type": "attribute"
},
{
"api_name": "fusionbox.blog.models.Blog",
"line_number": 80,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 90,
"usage_type": "attribute"
},
{
"api_name": "datetime.timedelta",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 91,
"usage_type": "attribute"
},
{
"api_name": "datetime.timedelta",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 92,
"usage_type": "call"
}
] |
15420800470
|
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
import urllib.request
# 132 명의 남자 연예인들
man_list = [
'장근석',
'유아인',
'유동근',
'이서진',
'송일국',
'최재성',
'장혁',
'김민종',
'지창욱',
'주진모',
'안성기',
'이순재',
'신영균',
'이정재',
'공유',
'이영하',
'권상우',
'이승기',
'김우빈',
'최수종',
'강석우',
'차승원',
'이민호',
'차인표',
'소지섭',
'유승호',
'박근형',
'송중기',
'송승헌',
'고수',
'현빈',
'남궁원',
'김수현',
'강신성',
'배용준',
'강동원',
'조인성',
'정우성',
'원빈',
'장동건',
'강다니엘',
'지민',
'백현',
'뷔',
'정국',
'디오',
'찬열',
'진',
'슈가',
'카이',
'수호',
'세훈',
'첸',
'시우민',
'제이홉',
'레이',
'RM ',
'김우석',
'박지훈',
'옹성우',
'닉쿤',
'탑',
'서강준',
'김선호',
'차은우',
'김현중',
'엘',
'박형식',
'임시완',
'김재중',
'최시원',
'정용화',
'황민현',
'동해',
'강인',
'태민',
'남태현',
'우지',
'예성',
'지코',
'정지훈',
'양요섭',
'온유',
'은혁',
'마크',
'루카스',
'태양',
'박해진',
'김범',
'박유천',
'김준수',
'이준기',
'헨리',
'이홍기',
'박보검',
'준호',
'김지석',
'김강우',
'이상엽',
'박서준',
'이선균',
'정일우',
'변요한',
'이준',
'지성',
'최강창민',
'유노윤호',
'이동욱',
'이지훈',
'우도환',
'김래원',
'장기용',
'남주혁',
'박시후',
'주지훈',
'서인국',
'윤계상',
'유연석',
'조승우',
'정해인',
'하석진',
'이제훈',
'규현',
'윤두준',
'키',
'윤시윤',
'신성록',
'안재현',
'옥택연',
'하정우',
'류준열',
'조정석'
]
count = 0
driver = webdriver.Chrome('C:/Users/grand/Desktop/ML/CycleGAN/data_utils/chromedriver.exe')
driver.get('https://www.google.co.kr/imghp?hl=ko&tab=wi&ogbl')
for man in man_list:
try:
elem = driver.find_element_by_name("q") # Get Search Bar
elem.send_keys('{} 얼굴 사진'.format(man)) # Type search words
elem.send_keys(Keys.RETURN) # Press Enter
time.sleep(3)
try:
images = driver.find_elements_by_css_selector(".rg_i.Q4LuWd")
except:
print('{} Passed'.format(man))
continue
if len(images) == 0 :
print('{} No Images'.format(man))
continue
time.sleep(1)
for image in images:
inner_count = 0
try:
image.click()
time.sleep(3)
imgUrl = driver.find_element_by_xpath("/html/body/div[2]/c-wiz/div[3]/div[2]/div[3]/div/div/div[3]/div[2]/c-wiz/div[1]/div[1]/div/div[2]/a/img").get_attribute('src')
if imgUrl[:4] != 'http':
continue
else:
urllib.request.urlretrieve(imgUrl, 'C:/Users/grand/Desktop/ML/CycleGAN/data/man/image_{}.jpg'.format(str(count)))
inner_count += 1
count += 1
if inner_count == 25 :
break
except:
continue
elem = driver.find_element_by_name("q")
elem.clear()
except:
print('{} Passed'.format(man))
continue
|
myoons/CycleGAN-Gender-Changer
|
data_utils/Korean_Crawling/man_crawling.py
|
man_crawling.py
|
py
| 4,090 |
python
|
en
|
code
| 6 |
github-code
|
6
|
[
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 144,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.keys.Keys.RETURN",
"line_number": 152,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.keys.Keys",
"line_number": 152,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "urllib.request.request.urlretrieve",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "urllib.request.request",
"line_number": 181,
"usage_type": "attribute"
},
{
"api_name": "urllib.request",
"line_number": 181,
"usage_type": "name"
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.