seq_id
string | text
string | repo_name
string | sub_path
string | file_name
string | file_ext
string | file_size_in_byte
int64 | program_lang
string | lang
string | doc_type
string | stars
int64 | dataset
string | pt
string | api
list |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
26606119063
|
#!/usr/bin/env python
# coding: utf-8
# # Import Library
# In[387]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from glob import glob
from sklearn.metrics import mean_absolute_error
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.pipeline import make_pipeline
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
from sklearn.metrics import accuracy_score , classification_report
import pandas_profiling
from category_encoders import OneHotEncoder
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import cross_validate
# In[388]:
t=pd.read_csv("test.csv")
t.info()
# # Import Data
# In[389]:
def wrangel(path):
# read data
df=pd.read_csv(path)
#extract the social name
df["title"]=df["Name"].str.extract("([A-Za-z]+)\.",expand=False)
#convert title categorcal data
df.loc[df["title"]=="Mr" , "title"] = 0
df.loc[df["title"]=="Miss" , "title"] = 1
df.loc[df["title"]=="Mrs" , "title"] = 2
df.loc[df["title"]=="Master" , "title"] = 3
conditions = (df["title"] == 'Ms') | (df["title"] == 'Col') | (df["title"] == 'Rev') | (df["title"] == 'Dr') | (df["title"] == 'Dona')
df.loc[conditions, "title"] = 4
#fill NAN Value of Fare Accorging to Social Name
df["Fare"].fillna(df.groupby("Pclass")["Fare"].transform("median"),inplace=True)
#fill NAN Value of Age Accorging to Social Name
df["Age"].fillna(df.groupby("title")["Age"].transform("median"),inplace=True)
#fill NAN Value of Embarked Accorging to Median
df["Embarked"]=df["Embarked"].fillna("S")
#remove nan columns
drop=[]
drop.append("Cabin")
drop.append("Name")
drop.append("Ticket")
drop.append("title")
df.drop(columns=drop,inplace=True)
#convert Sex categorcal data
df.loc[df["Sex"]=="male" , "Sex"] = 0 # Male ---> 0
df.loc[df["Sex"]=="female" , "Sex"] = 1 # Female ---> 1
#convert Embarked categorcal data
df.loc[df["Embarked"]=="S" , "Embarked"] = 0 # S ---> 1
df.loc[df["Embarked"]=="C" , "Embarked"] = 1 # C ---> 2
df.loc[df["Embarked"]=="Q" , "Embarked"] = 2 # Q ---> 3
return df
# In[390]:
test = wrangel("test.csv")
df = wrangel("train.csv")
# In[340]:
df.head()
# In[341]:
df.info()
# In[391]:
pandas_profiling.ProfileReport(df)
# In[343]:
df["Embarked"].value_counts()
# In[344]:
test.info()
# In[352]:
test.isnull().sum()
# # Exploer Data
# In[353]:
print("Survive :",(df["Survived"]==1).sum())
print("Deceased :",(df["Survived"]==0).sum())
# In[354]:
df.describe()
# In[355]:
# Create the pie chart
values=df["Survived"].value_counts()
label=["Deceased ","Survive "]
plt.pie(values, labels=label,autopct='%1.1f%%')
# Add a title
plt.title('Distribution of Survived')
# Display the chart
plt.show()
# In[356]:
plt.hist(df["Parch"],bins=5, edgecolor='black');
plt.xlabel('Values')
plt.ylabel('Frequancy')
plt.title("Values of Parch")
plt.show();
# In[357]:
survive=df[df["Survived"]==1]["SibSp"].value_counts()
death=df[df["Survived"]==0]["SibSp"].value_counts()
dx=pd.DataFrame([survive,death],index=["survive","death"])
dx.plot(kind="bar");
plt.title("Survive of SibSp ");
# In[358]:
survive=df[df["Survived"]==1]["Pclass"].value_counts()
death=df[df["Survived"]==0]["Pclass"].value_counts()
dx=pd.DataFrame([survive,death],index=["survive","death"])
dx.plot(kind="bar");
plt.title("Survive of Pclass ");
# In[359]:
class1=df[df["Pclass"]==1]["Embarked"].value_counts()
class2=df[df["Pclass"]==2]["Embarked"].value_counts()
class3=df[df["Pclass"]==3]["Embarked"].value_counts()
dx=pd.DataFrame([class1,class2,class3],index=["class 1","class 2","class 3"])
dx.plot(kind="bar",stacked=True);
plt.title("Survive of Pclass ");
# We Found that Embarked from S in 1st & 2nd & 3rd Class
# In[360]:
# Create the pie chart
values=df["Sex"].value_counts()
label=["male","female"]
plt.pie(values, labels=label,autopct='%1.1f%%')
# Add a title
plt.title('Distribution of Survived')
# Display the chart
plt.show()
# In[361]:
survive = df[df["Survived"]==1]["Sex"].value_counts()
death = df[df["Survived"]==0]["Sex"].value_counts()
dx = pd.DataFrame([survive,death],index=["survive","death"])
dx=dx.rename(columns={0:"male",1:"female"})
dx.plot(kind="bar")
plt.legend()
plt.title("Survive of Sex");
# In[ ]:
# In[362]:
corrleation = df.drop(columns="Survived").corr()
sns.heatmap(corrleation)
# # Split Data
# In[ ]:
# In[363]:
df
# In[364]:
target="Survived"
y = df[target]
X = df.drop(columns=target)
x_train , x_test , y_train , y_test = train_test_split(X,y,test_size=0.2,random_state=42)
print("X_train shape:", x_train.shape)
print("y_train shape:", y_train.shape)
print("X_test shape:", x_test.shape)
print("y_test shape:", y_test.shape)
# # Baseline
# In[365]:
y_train_mean = y_train.mean()
print ("Baseline :",round(y_train_mean,2))
# # Logestic Regression
# # Itrate
# In[366]:
log_model = LogisticRegression(max_iter=10000)
# In[367]:
log_model.fit(x_train,y_train)
# #
# # Evaluate
# In[368]:
accuracy=classification_report(y_test,log_model.predict(x_test))
print(accuracy)
# In[369]:
acc_test = accuracy_score(y_test,log_model.predict(x_test))
acc_test = accuracy_score(y_test,log_model.predict(x_test))
acc_train= accuracy_score(y_train,log_model.predict(x_train))
print("Accuracy test:",round(acc_test,2))
print("Accuracy train:",round(acc_train,2))
# # KNN Classfier
# In[370]:
knn= KNeighborsClassifier(n_neighbors=13)
knn.fit(x_train,y_train)
# In[371]:
accuracy=classification_report(y_test,knn.predict(x_test))
print(accuracy)
# In[372]:
scoring="accuracy"
score = cross_validate(knn , x_train.drop(columns=["PassengerId"],axis=1),y_train,cv=k_fold, n_jobs=1,scoring=scoring)
print(score['test_score'])
# In[373]:
print("Accuracy :",round(np.mean(score['test_score']),2))
# # Descion Tree
# In[374]:
# Create a decision tree classifier
dec_tree= DecisionTreeClassifier()
# Train the classifier
dec_tree.fit(x_train, y_train)
# In[375]:
accuracy=classification_report(y_test,dec_tree.predict(x_test))
print(accuracy)
# In[376]:
acc_test = accuracy_score(y_test,dec_tree.predict(x_test))
print("Accuracy test:",round(acc_test,2))
# In[377]:
scoring="accuracy"
score = cross_validate(dec_tree , x_train.drop(columns=["PassengerId"],axis=1),y_train,cv=k_fold, n_jobs=1,scoring=scoring)
print("Accuracy :",round(np.mean(score['test_score']),2))
# # Random Forest
# In[378]:
# Create a Random Forest classifier
rf_classifier = RandomForestClassifier()
# Train the classifier
rf_classifier.fit(x_train, y_train)
# In[379]:
# Calculate the accuracy
accuracy = accuracy_score(y_test, rf_classifier.predict(x_test))
print("Accuracy:", round(accuracy,2))
# In[380]:
scoring="accuracy"
score = cross_validate(rf_classifier , x_train.drop(columns=["PassengerId"],axis=1),y_train, n_jobs=1,scoring=scoring)
print("Accuracy :",round(np.mean(score['test_score']),1))
# # Naive Bayes
# In[381]:
nav= GaussianNB()
# Train the classifier
nav.fit(x_train, y_train)
# In[382]:
# Calculate the accuracy
accuracy = accuracy_score(y_test, nav.predict(x_test))
print("Accuracy:", round(accuracy,2))
# In[383]:
scoring="accuracy"
score = cross_validate(nav , x_train.drop(columns=["PassengerId"],axis=1),y_train, n_jobs=1,scoring=scoring)
print("Accuracy :",round(np.mean(score['test_score']),2))
# # Communicat
# The best model is Random Forest with Accuracy : 82
# In[384]:
pred_test=rf_classifier.predict(test)
data = pd.DataFrame({'PassengerId': test["PassengerId"], 'Survived': pred_test})
# In[385]:
data.head()
# In[386]:
data.to_csv(r'D:\projects\gender_submission.csv', index=False)
# In[ ]:
|
tamerelateeq/Titanc
|
titank.py
|
titank.py
|
py
| 8,173 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pandas.read_csv",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "pandas_profiling.ProfileReport",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.pie",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 143,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 145,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 147,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.hist",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 153,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 154,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 155,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 156,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 157,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 167,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 177,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 177,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",
"line_number": 186,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 188,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 188,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.pie",
"line_number": 199,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 199,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 201,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 201,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 203,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 203,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",
"line_number": 211,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 214,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 214,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 215,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 215,
"usage_type": "name"
},
{
"api_name": "seaborn.heatmap",
"line_number": 228,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 251,
"usage_type": "call"
},
{
"api_name": "sklearn.linear_model.LogisticRegression",
"line_number": 274,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.classification_report",
"line_number": 290,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.accuracy_score",
"line_number": 297,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.accuracy_score",
"line_number": 298,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.accuracy_score",
"line_number": 299,
"usage_type": "call"
},
{
"api_name": "sklearn.neighbors.KNeighborsClassifier",
"line_number": 309,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.classification_report",
"line_number": 316,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.cross_validate",
"line_number": 324,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 331,
"usage_type": "call"
},
{
"api_name": "sklearn.tree.DecisionTreeClassifier",
"line_number": 340,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.classification_report",
"line_number": 348,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.accuracy_score",
"line_number": 355,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.cross_validate",
"line_number": 363,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 364,
"usage_type": "call"
},
{
"api_name": "sklearn.ensemble.RandomForestClassifier",
"line_number": 373,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.accuracy_score",
"line_number": 382,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.cross_validate",
"line_number": 390,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 391,
"usage_type": "call"
},
{
"api_name": "sklearn.naive_bayes.GaussianNB",
"line_number": 399,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.accuracy_score",
"line_number": 408,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.cross_validate",
"line_number": 416,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 417,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 429,
"usage_type": "call"
}
] |
36060029870
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
class LocalizationNetwork(nn.Module):
def __init__(self, numOfControlPoints=10):
super().__init__()
self.numOfControlPoints = numOfControlPoints
self.pool = nn.MaxPool2d(2, 2)
self.aPool = nn.AdaptiveAvgPool2d(1)
self.conv1 = nn.Conv2d(3, 64, 3, padding=1)
self.conv2 = nn.Conv2d(64, 128, 3, padding=1)
self.conv3 = nn.Conv2d(128, 256, 3, padding=1)
self.conv4 = nn.Conv2d(256, 512, 3, padding=1)
self.bn1 = nn.BatchNorm2d(64)
self.bn2 = nn.BatchNorm2d(128)
self.bn3 = nn.BatchNorm2d(256)
self.bn4 = nn.BatchNorm2d(512)
self.relu = nn.ReLU(inplace=True)
self.fc1 = nn.Linear(512, 256)
self.fc2 = nn.Linear(256, numOfControlPoints * 2)
self.init_stn()
def forward(self, x):
x = self.bn1(F.relu(self.conv1(x)))
x = self.pool(x)
x = self.bn2(F.relu(self.conv2(x)))
x = self.pool(x)
x = self.bn3(F.relu(self.conv3(x)))
x = self.pool(x)
x = self.bn4(F.relu(self.conv4(x)))
x = self.aPool(x)
x = x.view(x.size()[0], -1)
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
x = x.view(-1, 2, self.numOfControlPoints)
return x
def init_stn(self):
interval = np.linspace(0.05, 0.95, self.numOfControlPoints // 2)
controlPoints = [[],[]]
for y in [0.1,0.9]:
for i in range(self.numOfControlPoints // 2):
controlPoints[1].append(y)
for x in interval:
controlPoints[0].append(x)
self.fc2.weight.data.zero_()
self.fc2.bias.data = torch.Tensor(controlPoints).view(-1).float().to(device)
|
xpiste05/knn_projekt
|
models/localizationNetwork.py
|
localizationNetwork.py
|
py
| 1,925 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "torch.device",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.Module",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "torch.nn.MaxPool2d",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "torch.nn.AdaptiveAvgPool2d",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "torch.nn.BatchNorm2d",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "torch.nn.BatchNorm2d",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "torch.nn.BatchNorm2d",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "torch.nn.BatchNorm2d",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.relu",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.relu",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.relu",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.relu",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "numpy.linspace",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "torch.Tensor",
"line_number": 68,
"usage_type": "call"
}
] |
32841420589
|
import pandas as pd
import numpy as np
import pickle as pkl
import matplotlib.pyplot as plt
import re
import jieba
import subprocess
from gensim.test.utils import get_tmpfile, common_texts
from gensim.models import Word2Vec, KeyedVectors
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.manifold import TSNE
from matplotlib.font_manager import FontManager
from pylab import mpl
jieba.load_userdict('C:/Users/choose/venv/Lib/site-packages/jieba/dict.blue.txt')
def load_stopwords():
with open('util/stopwords.pkl', 'rb') as f:
stopwords = pkl.load(f)
return stopwords
def load_symbols():
ret = []
with open('util/symbols_20181216.txt', 'r', encoding='utf-8') as f:
rows = f.readlines()
f.close()
for row in rows:
if row[:-1] not in ret:
ret.append(row[:-1])
return ret
def load_pattern():
symbols = load_symbols()
symbols += ['\n', '\r\n', '\r']
symbols_str = ''
for symbol in symbols:
if symbol in '[]()-': symbol = '\\' + symbol
symbols_str += symbol
return re.compile(r'([0-9]+|\.+|[a-zA-Z])|[{}]+'.format(symbols_str))
def to_sentence(document):
ret = list()
rule = re.compile('[\W]+')
result = rule.split(document)
for sentence in result:
if len(sentence) > 0:
ret.append(sentence)
return ret
def tokenize(corpus, stopwords=load_stopwords(), pattern=re.compile(r'[\WA-Za-z0-9]+'), length_constraint=2):
tokenized_corpus = []
for doc in corpus:
tokenized_doc = jieba.lcut(doc)
words = []
for word in tokenized_doc:
if word in stopwords or pattern.match(word): continue
elif len(word) < length_constraint: continue
else: words.append(word)
tokenized_corpus.append(words)
return tokenized_corpus
|
kartd0094775/IdentifyKOL
|
util/preprocessing.py
|
preprocessing.py
|
py
| 1,846 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "jieba.load_userdict",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "jieba.lcut",
"line_number": 54,
"usage_type": "call"
}
] |
19617294623
|
# _*_ coding: utf-8 _*_
import os
import csv
import time
import json
import logging
import numpy as np
import tensorflow as tf
from sklearn.metrics import auc, roc_curve
# calculate_auc : calculate AUC rate
def calculate_auc(labels, predicts):
fpr, tpr, _ = roc_curve(labels, predicts, pos_label=1)
AUC = auc(fpr, tpr)
return fpr, tpr, AUC
def contrastive_loss(labels, distance):
loss = tf.to_float(tf.reduce_sum(tf.square(distance - labels)))
return loss
def compute_accuracy(prediction, labels, threshold=0.5):
accu = 0.0
for i in xrange(len(prediction)):
if labels[i][0] == 1:
if prediction[i][0] > threshold:
accu += 1.0
else:
if prediction[i][0] < threshold:
accu += 1.0
acc = accu / len(prediction)
return acc
# read_and_decode : generate a queue based on filename
def read_and_decode(filename):
filename_queue = tf.train.string_input_producer([filename])
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(serialized_example, features={
'label': tf.FixedLenFeature([], tf.int64),
'cfg_1': tf.FixedLenFeature([], tf.string),
'cfg_2': tf.FixedLenFeature([], tf.string),
'dfg_1': tf.FixedLenFeature([], tf.string),
'dfg_2': tf.FixedLenFeature([], tf.string),
'fea_1': tf.FixedLenFeature([], tf.string),
'fea_2': tf.FixedLenFeature([], tf.string),
'num1': tf.FixedLenFeature([], tf.int64),
'num2': tf.FixedLenFeature([], tf.int64),
'max': tf.FixedLenFeature([], tf.int64)})
label = tf.cast(features['label'], tf.int32)
cfg_1 = features['cfg_1']
cfg_2 = features['cfg_2']
dfg_1 = features['dfg_1']
dfg_2 = features['dfg_2']
num1 = tf.cast(features['num1'], tf.int32)
fea_1 = features['fea_1']
num2 = tf.cast(features['num2'], tf.int32)
fea_2 = features['fea_2']
max_num = tf.cast(features['max'], tf.int32)
return label, cfg_1, cfg_2, dfg_1, dfg_2, fea_1, fea_2, num1, num2, max_num
# GoCloneTfHandler : handler using tensorflow to detect code clone in Golang
class GoCloneTfHandler(object):
def __init__(self, iteration_times=5, embedding_depth=2, embedding_size=64, feature_num=10, mini_batch=10, learning_rate=0.0001, max_iter=1, decay_steps=10, decay_rate=0.0001, snapshot=1, test_num=1000, train_tfrecord="tfrecord/train.tfrecord",test_tfrecord="tfrecord/test.tfrecord",valid_tfrecord="tfrecord/valid.tfrecord", exist_model="", ckpt_file="", test_file="", result_file="",func_info_path=""):
# self.iteration_times = iteration_times # T
self.embedding_depth = embedding_depth # N
self.embedding_size = embedding_size # P
self.feature_num = feature_num # D
self.mini_batch = mini_batch # B
self.learning_rate = learning_rate # lr
self.max_iter = max_iter
self.decay_steps = decay_steps
self.decay_rate = decay_rate
self.snapshot = snapshot
self.test_file = test_file
self.result_file = result_file
self.test_num = test_num
self.train_tfrecord = train_tfrecord
self.test_tfrecord = test_tfrecord
self.valid_tfrecord = valid_tfrecord
self.exist_model = exist_model
self.ckpt_file = ckpt_file
self.func_info_path = func_info_path
self.pair_list = []
self.logger = logging.getLogger("default")
self.logger_init()
def load_csv_as_pair(self, pair_label_file):
with open(pair_label_file, "r") as fp:
pair_label = csv.reader(fp)
for line in pair_label:
self.pair_list.append((line[0], line[1]))
# logger_init : initialize logger for console and file
def logger_init(self):
self.logger.setLevel(logging.DEBUG)
log_format = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.DEBUG)
console_handler.setFormatter(log_format)
self.logger.addHandler(console_handler)
log_file_name = "logs/log%s.txt" % time.strftime("%Y-%m-%d-%H:%M:%S", time.localtime())
file_handler = logging.FileHandler(log_file_name, mode='w', encoding='utf-8')
file_handler.setLevel(logging.INFO)
file_handler.setFormatter(log_format)
self.logger.addHandler(file_handler)
# structure2vec : Construct pairs dataset to train the model.
def structure2vec(self, mu_prev, cfg, dfg, x, name="structure2vec"):
with tf.variable_scope(name):
W_1 = tf.get_variable('W_1', [self.feature_num, self.embedding_size], tf.float32, tf.truncated_normal_initializer(mean=0.0, stddev=0.1))
param_cfg = tf.get_variable('param_cfg', 1, tf.float32, tf.truncated_normal_initializer(mean=0.0, stddev=0.1))
P_CFG_1 = tf.get_variable('P_CFG_1', [self.embedding_size, self.embedding_size], tf.float32, tf.truncated_normal_initializer(mean=0.0, stddev=0.1))
P_CFG_2 = tf.get_variable('P_CFG_2', [self.embedding_size, self.embedding_size], tf.float32, tf.truncated_normal_initializer(mean=0.0, stddev=0.1))
L_CFG = tf.reshape(tf.matmul(cfg, mu_prev, transpose_a=True), (-1, self.embedding_size))
S_CFG =param_cfg*tf.reshape(tf.matmul(tf.nn.relu(tf.matmul(L_CFG, P_CFG_2)), P_CFG_1), (-1, self.embedding_size))
param_dfg = tf.get_variable('param_dfg', 1, tf.float32, tf.truncated_normal_initializer(mean=0.0, stddev=0.1))
P_DFG_1 = tf.get_variable('P_DFG_1', [self.embedding_size, self.embedding_size], tf.float32, tf.truncated_normal_initializer(mean=0.0, stddev=0.1))
P_DFG_2 = tf.get_variable('P_DFG_2', [self.embedding_size, self.embedding_size], tf.float32, tf.truncated_normal_initializer(mean=0.0, stddev=0.1))
L_DFG = tf.reshape(tf.matmul(dfg, mu_prev, transpose_a=True), (-1, self.embedding_size))
S_DFG = param_dfg*tf.reshape(tf.matmul(tf.nn.relu(tf.matmul(L_DFG, P_DFG_2)), P_DFG_1), (-1, self.embedding_size))
return tf.tanh(tf.add(tf.add(tf.reshape(tf.matmul(tf.reshape(x, (-1, self.feature_num)), W_1), (-1, self.embedding_size)), S_CFG), S_DFG))
def structure2vec_net(self, cfgs, dfgs, x, v_num):
with tf.variable_scope("structure2vec_net") as structure2vec_net:
B_mu_5 = tf.Variable(tf.zeros(shape = [0, self.embedding_size]), trainable=False)
w_2 = tf.get_variable('w_2', [self.embedding_size, self.embedding_size], tf.float32, tf.truncated_normal_initializer(mean=0.0, stddev=0.1))
for i in range(self.mini_batch):
cur_size = tf.to_int32(v_num[i][0])
mu_0 = tf.reshape(tf.zeros(shape = [cur_size, self.embedding_size]), (cur_size, self.embedding_size))
cfg = tf.slice(cfgs[i], [0, 0], [cur_size, cur_size])
dfg = tf.slice(dfgs[i], [0, 0], [cur_size, cur_size])
fea = tf.slice(x[i],[0,0], [cur_size, self.feature_num])
mu_1 = self.structure2vec(mu_0, cfg, dfg, fea)
structure2vec_net.reuse_variables()
mu_2 = self.structure2vec(mu_1, cfg, dfg, fea)
mu_3 = self.structure2vec(mu_2, cfg, dfg, fea)
mu_4 = self.structure2vec(mu_3, cfg, dfg, fea)
mu_5 = self.structure2vec(mu_4, cfg, dfg, fea)
B_mu_5 = tf.concat([B_mu_5,tf.matmul(tf.reshape(tf.reduce_sum(mu_5, 0), (1, self.embedding_size)), w_2)],0)
return B_mu_5
def cal_distance(self, model1, model2):
a_b = tf.reduce_sum(tf.reshape(tf.reduce_prod(tf.concat([tf.reshape(model1,(1,-1)), tf.reshape(model2,(1,-1))],0),0),(self.mini_batch,self.embedding_size)),1,keep_dims=True)
a_norm = tf.sqrt(tf.reduce_sum(tf.square(model1),1,keep_dims=True))
b_norm = tf.sqrt(tf.reduce_sum(tf.square(model2),1,keep_dims=True))
distance = a_b/tf.reshape(tf.reduce_prod(tf.concat([tf.reshape(a_norm,(1,-1)), tf.reshape(b_norm,(1,-1))],0),0),(self.mini_batch,1))
return distance
def get_batch(self, label, cfg_str1, cfg_str2, dfg_str1, dfg_str2, fea_str1, fea_str2, num1, num2, max_num):
y = np.reshape(label, [self.mini_batch, 1])
v_num_1 = []
v_num_2 = []
for i in range(self.mini_batch):
v_num_1.append([int(num1[i])])
v_num_2.append([int(num2[i])])
cfg_1 = []
cfg_2 = []
dfg_1 = []
dfg_2 = []
for i in range(self.mini_batch):
cfg_arr = np.array(cfg_str1[i].split(','))
cfg_adj = np.reshape(cfg_arr, (int(num1[i]), int(num1[i])))
cfg_ori1 = cfg_adj.astype(np.float32)
cfg_ori1.resize(int(max_num[i]), int(max_num[i]), refcheck=False)
cfg_1.append(cfg_ori1.tolist())
cfg_arr = np.array(cfg_str2[i].split(','))
cfg_adj = np.reshape(cfg_arr, (int(num2[i]), int(num2[i])))
cfg_ori2 = cfg_adj.astype(np.float32)
cfg_ori2.resize(int(max_num[i]), int(max_num[i]), refcheck=False)
cfg_2.append(cfg_ori2.tolist())
dfg_arr = np.array(dfg_str1[i].split(','))
dfg_adj = np.reshape(dfg_arr, (int(num1[i]), int(num1[i])))
dfg_ori1 = dfg_adj.astype(np.float32)
dfg_ori1.resize(int(max_num[i]), int(max_num[i]), refcheck=False)
dfg_1.append(dfg_ori1.tolist())
dfg_arr = np.array(dfg_str2[i].split(','))
dfg_adj = np.reshape(dfg_arr, (int(num2[i]), int(num2[i])))
dfg_ori2 = dfg_adj.astype(np.float32)
dfg_ori2.resize(int(max_num[i]), int(max_num[i]), refcheck=False)
dfg_2.append(dfg_ori2.tolist())
fea_1 = []
fea_2 = []
for i in range(self.mini_batch):
fea_arr = np.array(fea_str1[i].split(','))
fea_ori = fea_arr.astype(np.float32)
fea_vec1 = np.resize(fea_ori, (np.max(v_num_1), self.feature_num))
fea_1.append(fea_vec1)
fea_arr = np.array(fea_str2[i].split(','))
fea_ori= fea_arr.astype(np.float32)
fea_vec2 = np.resize(fea_ori, (np.max(v_num_2), self.feature_num))
fea_2.append(fea_vec2)
return y, cfg_1, cfg_2, dfg_1, dfg_2, fea_1, fea_2, v_num_1, v_num_2
def run(self):
tf.global_variables_initializer()
global_step = tf.Variable(0, trainable=False)
learning_rate = tf.train.exponential_decay(self.learning_rate, global_step, self.decay_steps, self.decay_rate, staircase=True)
v_num_left = tf.placeholder(tf.float32, shape=[self.mini_batch, 1], name='v_num_left')
cfg_left = tf.placeholder(tf.float32, shape=([self.mini_batch, None, None]), name='cfg_left')
dfg_left = tf.placeholder(tf.float32, shape=([self.mini_batch, None, None]), name='dfg_left')
fea_left = tf.placeholder(tf.float32, shape=([self.mini_batch, None, self.feature_num]), name='fea_left')
v_num_right = tf.placeholder(tf.float32, shape=[self.mini_batch, 1], name='v_num_right')
cfg_right = tf.placeholder(tf.float32, shape=([self.mini_batch, None, None]), name='cfg_right')
dfg_right = tf.placeholder(tf.float32, shape=([self.mini_batch, None, None]), name='dfg_right')
fea_right = tf.placeholder(tf.float32, shape=([self.mini_batch, None, self.feature_num]), name='fea_right')
labels = tf.placeholder(tf.float32, shape=([self.mini_batch, 1]), name='gt')
dropout_f = tf.placeholder("float")
with tf.variable_scope("siamese") as siamese:
model1 = self.structure2vec_net(cfg_left, dfg_left, fea_left, v_num_left)
siamese.reuse_variables()
model2 = self.structure2vec_net(cfg_right, dfg_right, fea_right, v_num_right)
dis = self.cal_distance(model1, model2)
loss = contrastive_loss(labels, dis)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss)
list_test_label, list_test_cfg_1, list_test_cfg_2, list_test_dfg_1, list_test_dfg_2, list_test_fea_1, \
list_test_fea_2, list_test_num1, list_test_num2, list_test_max = read_and_decode(self.test_tfrecord)
batch_test_label, batch_test_cfg_1, batch_test_cfg_2, batch_test_dfg_1, batch_test_dfg_2, batch_test_fea_1, \
batch_test_fea_2, batch_test_num1, batch_test_num2, batch_test_max \
= tf.train.batch([list_test_label, list_test_cfg_1, list_test_cfg_2, list_test_dfg_1, list_test_dfg_2,
list_test_fea_1, list_test_fea_2, list_test_num1, list_test_num2, list_test_max],
batch_size=self.mini_batch, capacity=10)
init_opt = tf.global_variables_initializer()
saver = tf.train.Saver()
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
# read json from func_info_path
with open(self.func_info_path, 'r') as f:
func_info_dic = json.load(f)
result_dic = {}
with tf.Session() as sess:
writer = tf.summary.FileWriter('logs/', sess.graph)
# check whether to load exist models
if self.exist_model == "":
sess.run(init_opt)
else:
saver = tf.train.import_meta_graph(self.ckpt_file)
saver.restore(sess, tf.train.latest_checkpoint(self.exist_model))
self.logger.info("Loading models from %s" % self.ckpt_file)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
# Training cycle
iter=0
self.load_csv_as_pair(self.test_file)
while iter < self.max_iter:
iter += 1
total_batch = int(self.test_num / self.mini_batch)
if iter % self.snapshot == 0:
total_labels = []
total_predicts = []
avg_loss = 0.
avg_acc = 0.
test_total_batch = int(self.test_num / self.mini_batch)
start_time = time.time()
for m in range(test_total_batch):
test_label, test_cfg_1, test_cfg_2, test_dfg_1, test_dfg_2, \
test_fea_1, test_fea_2, test_num1, test_num2, test_max = sess.run(
[batch_test_label, batch_test_cfg_1, batch_test_cfg_2, batch_test_dfg_1, batch_test_dfg_2, batch_test_fea_1, batch_test_fea_2, batch_test_num1, batch_test_num2, batch_test_max])
y, cfg_1, cfg_2, dfg_1, dfg_2, fea_1, fea_2, v_num_1, v_num_2 \
= self.get_batch(test_label, test_cfg_1, test_cfg_2, test_dfg_1, test_dfg_2,
test_fea_1, test_fea_2, test_num1, test_num2, test_max)
predict = dis.eval(
feed_dict={cfg_left: cfg_1, dfg_left: dfg_1, fea_left: fea_1, v_num_left: v_num_1, cfg_right: cfg_2,
dfg_right: dfg_2, fea_right: fea_2, v_num_right: v_num_2, labels: y, dropout_f: 1.0})
for k, p in enumerate(predict):
(id1, id2) = self.pair_list[y[k][0]]
result_dic[(func_info_dic[id1], func_info_dic[id2])] = p[0]
if m % 20 == 0:
self.logger.info("Testing: %s/%s" % (m, test_total_batch))
coord.request_stop()
coord.join(threads)
result_desc = sorted(result_dic.items(), key=lambda item:-item[1])
with open(self.result_file, "w") as f:
for r in result_desc:
f.write("%s\n%s\n%.4f\n\n" % (r[0][0], r[0][1], r[1]))
|
wangcong15/go-clone
|
Go-CloneF/src/tfrecord2test.py
|
tfrecord2test.py
|
py
| 15,894 |
python
|
en
|
code
| 5 |
github-code
|
6
|
[
{
"api_name": "sklearn.metrics.roc_curve",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.auc",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "tensorflow.to_float",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "tensorflow.reduce_sum",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "tensorflow.square",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "tensorflow.train.string_input_producer",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "tensorflow.train",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.TFRecordReader",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "tensorflow.parse_single_example",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "tensorflow.FixedLenFeature",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "tensorflow.int64",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.FixedLenFeature",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "tensorflow.string",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.FixedLenFeature",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "tensorflow.string",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.FixedLenFeature",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "tensorflow.string",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.FixedLenFeature",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "tensorflow.string",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.FixedLenFeature",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "tensorflow.string",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.FixedLenFeature",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "tensorflow.string",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.FixedLenFeature",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "tensorflow.int64",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.FixedLenFeature",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "tensorflow.int64",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.FixedLenFeature",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "tensorflow.int64",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.cast",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "tensorflow.int32",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.cast",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "tensorflow.int32",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.cast",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "tensorflow.int32",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.cast",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "tensorflow.int32",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "logging.getLogger",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "csv.reader",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 102,
"usage_type": "attribute"
},
{
"api_name": "logging.Formatter",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "logging.StreamHandler",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 106,
"usage_type": "attribute"
},
{
"api_name": "time.strftime",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "time.localtime",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "logging.FileHandler",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 112,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.variable_scope",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "tensorflow.get_variable",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "tensorflow.float32",
"line_number": 119,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.truncated_normal_initializer",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "tensorflow.get_variable",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "tensorflow.float32",
"line_number": 121,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.truncated_normal_initializer",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "tensorflow.get_variable",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "tensorflow.float32",
"line_number": 122,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.truncated_normal_initializer",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "tensorflow.get_variable",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "tensorflow.float32",
"line_number": 123,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.truncated_normal_initializer",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "tensorflow.reshape",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "tensorflow.matmul",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "tensorflow.reshape",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "tensorflow.matmul",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "tensorflow.nn.relu",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "tensorflow.nn",
"line_number": 125,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.get_variable",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "tensorflow.float32",
"line_number": 127,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.truncated_normal_initializer",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "tensorflow.get_variable",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "tensorflow.float32",
"line_number": 128,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.truncated_normal_initializer",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "tensorflow.get_variable",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "tensorflow.float32",
"line_number": 129,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.truncated_normal_initializer",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "tensorflow.reshape",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "tensorflow.matmul",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "tensorflow.reshape",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "tensorflow.matmul",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "tensorflow.nn.relu",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "tensorflow.nn",
"line_number": 131,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.tanh",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "tensorflow.add",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "tensorflow.reshape",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "tensorflow.matmul",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "tensorflow.variable_scope",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "tensorflow.Variable",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "tensorflow.zeros",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "tensorflow.get_variable",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "tensorflow.float32",
"line_number": 138,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.truncated_normal_initializer",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "tensorflow.to_int32",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "tensorflow.reshape",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "tensorflow.zeros",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "tensorflow.slice",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "tensorflow.slice",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "tensorflow.slice",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "tensorflow.concat",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "tensorflow.matmul",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "tensorflow.reshape",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "tensorflow.reduce_sum",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "tensorflow.reduce_sum",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "tensorflow.reshape",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "tensorflow.reduce_prod",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "tensorflow.concat",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "tensorflow.sqrt",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "tensorflow.reduce_sum",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "tensorflow.square",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "tensorflow.sqrt",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "tensorflow.reduce_sum",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "tensorflow.square",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "tensorflow.reshape",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "tensorflow.reduce_prod",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "tensorflow.concat",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "numpy.reshape",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "numpy.reshape",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 177,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "numpy.reshape",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 183,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 187,
"usage_type": "call"
},
{
"api_name": "numpy.reshape",
"line_number": 188,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 189,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 193,
"usage_type": "call"
},
{
"api_name": "numpy.reshape",
"line_number": 194,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 195,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 202,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 203,
"usage_type": "attribute"
},
{
"api_name": "numpy.resize",
"line_number": 204,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 204,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 207,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 208,
"usage_type": "attribute"
},
{
"api_name": "numpy.resize",
"line_number": 209,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 209,
"usage_type": "call"
},
{
"api_name": "tensorflow.global_variables_initializer",
"line_number": 215,
"usage_type": "call"
},
{
"api_name": "tensorflow.Variable",
"line_number": 216,
"usage_type": "call"
},
{
"api_name": "tensorflow.train.exponential_decay",
"line_number": 217,
"usage_type": "call"
},
{
"api_name": "tensorflow.train",
"line_number": 217,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.placeholder",
"line_number": 219,
"usage_type": "call"
},
{
"api_name": "tensorflow.float32",
"line_number": 219,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.placeholder",
"line_number": 220,
"usage_type": "call"
},
{
"api_name": "tensorflow.float32",
"line_number": 220,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.placeholder",
"line_number": 221,
"usage_type": "call"
},
{
"api_name": "tensorflow.float32",
"line_number": 221,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.placeholder",
"line_number": 222,
"usage_type": "call"
},
{
"api_name": "tensorflow.float32",
"line_number": 222,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.placeholder",
"line_number": 224,
"usage_type": "call"
},
{
"api_name": "tensorflow.float32",
"line_number": 224,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.placeholder",
"line_number": 225,
"usage_type": "call"
},
{
"api_name": "tensorflow.float32",
"line_number": 225,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.placeholder",
"line_number": 226,
"usage_type": "call"
},
{
"api_name": "tensorflow.float32",
"line_number": 226,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.placeholder",
"line_number": 227,
"usage_type": "call"
},
{
"api_name": "tensorflow.float32",
"line_number": 227,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.placeholder",
"line_number": 229,
"usage_type": "call"
},
{
"api_name": "tensorflow.float32",
"line_number": 229,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.placeholder",
"line_number": 231,
"usage_type": "call"
},
{
"api_name": "tensorflow.variable_scope",
"line_number": 233,
"usage_type": "call"
},
{
"api_name": "tensorflow.train.AdamOptimizer",
"line_number": 242,
"usage_type": "call"
},
{
"api_name": "tensorflow.train",
"line_number": 242,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.train.batch",
"line_number": 248,
"usage_type": "call"
},
{
"api_name": "tensorflow.train",
"line_number": 248,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.global_variables_initializer",
"line_number": 251,
"usage_type": "call"
},
{
"api_name": "tensorflow.train.Saver",
"line_number": 253,
"usage_type": "call"
},
{
"api_name": "tensorflow.train",
"line_number": 253,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 254,
"usage_type": "attribute"
},
{
"api_name": "json.load",
"line_number": 258,
"usage_type": "call"
},
{
"api_name": "tensorflow.Session",
"line_number": 261,
"usage_type": "call"
},
{
"api_name": "tensorflow.summary.FileWriter",
"line_number": 262,
"usage_type": "call"
},
{
"api_name": "tensorflow.summary",
"line_number": 262,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.train.import_meta_graph",
"line_number": 268,
"usage_type": "call"
},
{
"api_name": "tensorflow.train",
"line_number": 268,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.train.latest_checkpoint",
"line_number": 269,
"usage_type": "call"
},
{
"api_name": "tensorflow.train",
"line_number": 269,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.train.Coordinator",
"line_number": 272,
"usage_type": "call"
},
{
"api_name": "tensorflow.train",
"line_number": 272,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.train.start_queue_runners",
"line_number": 273,
"usage_type": "call"
},
{
"api_name": "tensorflow.train",
"line_number": 273,
"usage_type": "attribute"
},
{
"api_name": "time.time",
"line_number": 287,
"usage_type": "call"
}
] |
6575851777
|
from time import sleep
from digitemp.master import UART_Adapter
from digitemp.device import DS18B20
import random
import asyncio
import logging
class TemperatureMonitor:
def __init__(self):
self.bus = UART_Adapter('/dev/ttyUSB0')
self.sensor = DS18B20(self.bus)
self.listeners = []
def register_listener(self, listener):
# A listener has a on_notify(new_temp) async function
self.listeners.append(listener)
def unregister_listener(self, listener):
self.listeners.remove(listener)
async def notify_listeners(self):
cur_temp = self.sensor.get_temperature()
# f = open('afile', 'r')
# cur_temp = int(f.readline().strip())
# f.close()
self.cur_temp = cur_temp
logging.info(f'Temperature is: {cur_temp}')
await asyncio.gather(
*(l.on_notify(cur_temp) for l in self.listeners)
)
async def monitor(self):
while True:
await asyncio.gather(
self.notify_listeners(),
asyncio.sleep(10)
)
|
SchrodingersCat00/vuurwachter
|
src/temp_monitor.py
|
temp_monitor.py
|
py
| 1,091 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "digitemp.master.UART_Adapter",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "digitemp.device.DS18B20",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "asyncio.gather",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "asyncio.gather",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "asyncio.sleep",
"line_number": 37,
"usage_type": "call"
}
] |
18164640711
|
import pickle
import nltk
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
# Download NLTK data (you only need to do this once)
nltk.download('stopwords')
nltk.download('wordnet')
# Load the trained model and vectorizer
with open('check_spam_classifier.pkl', 'rb') as clf_file:
clf = pickle.load(clf_file)
with open('check_spam_vectorizer.pkl', 'rb') as vectorizer_file:
vectorizer = pickle.load(vectorizer_file)
# Load labels from the text file
with open('labels.txt', 'r') as labels_file:
labels = labels_file.read().splitlines()
# Define stopwords and lemmatizer
stop_words = set(stopwords.words('english'))
lemmatizer = WordNetLemmatizer()
def preprocess_input(text):
# Preprocess the input text in the same way as the training data
text = text.lower()
text = ' '.join([word for word in text.split() if word not in stop_words])
text = ' '.join([lemmatizer.lemmatize(word) for word in text.split()])
return text
def is_scam(input_text):
# Preprocess the input text
input_text = preprocess_input(input_text)
# Vectorize the preprocessed text
input_text_tfidf = vectorizer.transform([input_text])
# Make a prediction
prediction = clf.predict(input_text_tfidf)
# Get the label using the labels list
predicted_label = labels[prediction[0]]
return predicted_label
if __name__ == "__main__":
user_input = input("Enter text to check if it's a scam: ")
result = is_scam(user_input)
print(f"Predicted label: {result}")
|
GOVINDFROMINDIA/Twitter-Scam-Victims
|
dsg.py
|
dsg.py
|
py
| 1,596 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "nltk.download",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "nltk.download",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "nltk.corpus.stopwords.words",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "nltk.corpus.stopwords",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "nltk.stem.WordNetLemmatizer",
"line_number": 23,
"usage_type": "call"
}
] |
39729133373
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('store', '0010_auto_20151113_1608'),
]
operations = [
migrations.AddField(
model_name='review',
name='author',
field=models.CharField(default=b'Anonymous', max_length=30),
),
]
|
midnitehighways/shop
|
store/migrations/0011_review_author.py
|
0011_review_author.py
|
py
| 420 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.db.migrations.Migration",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "django.db.migrations",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.db.migrations.AddField",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "django.db.migrations",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 17,
"usage_type": "name"
}
] |
36867613594
|
from datetime import datetime
from sqlalchemy import Column, TIMESTAMP
class TimestampsMixin:
__abstract__ = True
__created_at_name__ = 'created_at'
__updated_at_name__ = 'updated_at'
__datetime_func__ = datetime.now()
created_at = Column(
__created_at_name__,
TIMESTAMP(timezone=False),
default=__datetime_func__,
nullable=False
)
updated_at = Column(
__updated_at_name__,
TIMESTAMP(timezone=False),
default=__datetime_func__,
onupdate=__datetime_func__,
nullable=False
)
|
siarie/fastapi-start
|
app/db/mixins.py
|
mixins.py
|
py
| 581 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "datetime.datetime.now",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.TIMESTAMP",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.TIMESTAMP",
"line_number": 20,
"usage_type": "call"
}
] |
14555529648
|
from time import sleep
import btc
import click
from core import BitcoinTwitterProfile
import schedule
@click.group()
def bitc0in_twitter():
"""
Syncs your twitter profile with bitcoin's volatility.
"""
@bitc0in_twitter.command()
def run():
"""Start Program"""
bitcoin_percent_change = btc.get_percent_change()
profile = BitcoinTwitterProfile(bitcoin_percent_change=bitcoin_percent_change)
def job():
bitcoin_percent_change = btc.get_percent_change()
state = profile.get_market_state(bitcoin_percent_change)
if state == "bearish":
profile.dumping()
else:
profile.pumping()
schedule.every(10).minutes.do(job)
while True:
schedule.run_pending()
sleep(1)
# print(".", end="", flush=True)
@bitc0in_twitter.command()
def test():
"""Tests everything is setup correctly."""
click.echo("TESTING!!!")
bms = BitcoinTwitterProfile(bitcoin_percent_change=5)
bms.dumping()
click.echo("check the for bearish profile")
click.echo(f"state: {bms.state}")
click.echo("Sleeping for 15 seconds.")
sleep(15)
bms.pumping()
click.echo("check the for bullish profile")
click.echo(f"state: {bms.state}")
sleep(15)
bms.dumping()
if __name__ == "__main__":
bitc0in_twitter()
|
dgnsrekt/bitc0in-twitter
|
bitc0in_twitter/cli.py
|
cli.py
|
py
| 1,335 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "click.group",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "btc.get_percent_change",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "core.BitcoinTwitterProfile",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "btc.get_percent_change",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "schedule.every",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "schedule.run_pending",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "click.echo",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "core.BitcoinTwitterProfile",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "click.echo",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "click.echo",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "click.echo",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "click.echo",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "click.echo",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 55,
"usage_type": "call"
}
] |
6401924379
|
# version: python 3.7
# zID: z5052292
from socket import *
from datetime import datetime
import time
import sys
serverIP = sys.argv[1]
serverPort = int(sys.argv[2])
clientSocket = socket(AF_INET, SOCK_DGRAM)
list_rtts = []
packets_lost = 0
for i in range(10):
time_stamp = datetime.now().isoformat(sep=' ')[:-3]
ping_message = "PING" + str(i) + ' ' + time_stamp + '\r\n'
time_send = datetime.now()
clientSocket.sendto(ping_message.encode(), (serverIP, serverPort))
try:
clientSocket.settimeout(1)
response, severAddress = clientSocket.recvfrom(2048)
time_receive = datetime.now()
rtt = round((time_receive - time_send).total_seconds() * 1000)
list_rtts.append(rtt)
print(f'Ping to {serverIP}, seq = {i}, rtt = {rtt} ms')
clientSocket.settimeout(None)
except timeout:
packets_lost += 1
print(f'Ping to {serverIP}, seq = {i}, rtt = time out')
print("\n")
print(f'Minimun RTT = {min(list_rtts)} ms')
print(f'Maximun RTT = {max(list_rtts)} ms')
print(f'Average RTT = {round(float(sum(list_rtts)/len(list_rtts)))} ms')
print(f'10 packets transmitted, {10 - int(packets_lost)} packets received, {float(packets_lost) / 10 * 100}% of packets loss.')
clientSocket.close()
|
YuanG1944/COMP9331-Computer-Networks-and-Applications
|
Lab2/PingClient_zhou.py
|
PingClient_zhou.py
|
py
| 1,235 |
python
|
en
|
code
| 4 |
github-code
|
6
|
[
{
"api_name": "sys.argv",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 25,
"usage_type": "name"
}
] |
10426011052
|
"""Conceptual model page."""
from django.db import models
from wagtail.core.models import Page
from wagtail.core.fields import RichTextField
from wagtail.admin.edit_handlers import FieldPanel
from wagtail.images.edit_handlers import ImageChooserPanel
class CMPage(Page):
template = "ecos_cm/cm_page.html"
ECOLOGICAL_PROCESSES = "ecological processes"
TARGET_SPECIES = "target species"
CONCELTUAL_MODEL_TYPE_CHOICES = [
(ECOLOGICAL_PROCESSES,'ecological processes'),
(TARGET_SPECIES, 'target species'),
]
conceptual_model_type = models.CharField(
choices=CONCELTUAL_MODEL_TYPE_CHOICES,
max_length=100,
default=ECOLOGICAL_PROCESSES,
)
cm_title = models.CharField(max_length=300, null=True, blank=True)
cm_image = models.ForeignKey(
"wagtailimages.Image",
null=True,
blank=False,
on_delete=models.SET_NULL,
related_name="+"
)
cm_human_interactions = RichTextField( null=True, blank=True)
cm_ecolagical_processes = RichTextField( null=True, blank=True)
cm_oceanographic_variables = RichTextField( null=True, blank=True)
cm_performance_indicators = RichTextField( null=True, blank=True)
content_panels = Page.content_panels + [
FieldPanel("conceptual_model_type"),
FieldPanel("cm_title"),
ImageChooserPanel("cm_image"),
FieldPanel("cm_human_interactions"),
FieldPanel("cm_ecolagical_processes"),
FieldPanel("cm_oceanographic_variables"),
FieldPanel("cm_performance_indicators"),
]
|
CNR-ISMAR/ecoads
|
ecos_cm/models.py
|
models.py
|
py
| 1,600 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "wagtail.core.models.Page",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "django.db.models.SET_NULL",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "wagtail.core.fields.RichTextField",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "wagtail.core.fields.RichTextField",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "wagtail.core.fields.RichTextField",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "wagtail.core.fields.RichTextField",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "wagtail.core.models.Page.content_panels",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "wagtail.core.models.Page",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "wagtail.admin.edit_handlers.FieldPanel",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.edit_handlers.FieldPanel",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "wagtail.images.edit_handlers.ImageChooserPanel",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.edit_handlers.FieldPanel",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.edit_handlers.FieldPanel",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.edit_handlers.FieldPanel",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "wagtail.admin.edit_handlers.FieldPanel",
"line_number": 53,
"usage_type": "call"
}
] |
34600680585
|
import numpy as np
import matplotlib.pyplot as plt
def sigmoid(Z):
"""sigmoid
Arguments:
Z {[np.array]} -- [Wx + b]
Returns:
A - [np.array] -- [1 / 1+exp(- Wx + b)]
cache - Z
"""
A = 1/(1+np.exp(-Z))
cache = Z
return A, cache
def relu(Z):
"""rectified linear unit
Arguments:
Z {[np.array]} -- [Wx + b]
Returns:
A - [np.array] -- [max(0,Z)]
cache - Z
"""
A = np.maximum(0,Z)
assert(A.shape == Z.shape)
cache = Z
return A, cache
def relu_backward(dA, cache):
"""
Implement the backward propagation for a single RELU unit.
Arguments:
dA - the activated gradient
cache - Z
Returns:
dZ - Gradient of the cost with respect to Z
"""
Z = cache
dZ = np.array(dA, copy=True) # just converting dz to a correct object.
# for z <= 0, set dz to 0
dZ[Z <= 0] = 0
assert (dZ.shape == Z.shape)
return dZ
def sigmoid_backward(dA, cache):
"""
Implement the backward propagation for a single SIGMOID unit.
Arguments:
dA - the acitvated gradient
cache - Z
Returns:
dZ - Gradient of the cost with respect to Z
"""
Z = cache
s = 1/(1+np.exp(-Z))
dZ = dA * s * (1-s)
assert (dZ.shape == Z.shape)
return dZ
def print_mislabeled_images(classes, X, y, p):
"""
Plots images where predictions and truth were different.
X -- dataset
y -- true labels
p -- predictions
"""
a = p + y
mislabeled_indices = np.asarray(np.where(a == 1))
print(mislabeled_indices)
num_images = len(mislabeled_indices[0])
for i in range(num_images):
index = mislabeled_indices[1][i]
plt.subplot(2, num_images, i + 1)
plt.imshow(X[:,index].reshape(64,64,3))
plt.title("Prediction: " + classes[int(p[0,index])].decode("utf-8") + " \n Class: " + classes[y[0,index]].decode("utf-8"))
plt.show()
|
anantsrivastava30/deeplearning
|
dnn_utils.py
|
dnn_utils.py
|
py
| 1,924 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "numpy.exp",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.maximum",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "numpy.exp",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 94,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 95,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 96,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 97,
"usage_type": "name"
}
] |
38030500642
|
import numpy as np
import matplotlib.pyplot as plt
from tqdm.auto import tqdm
"""
Reads Siemens rawdata file and returns the DICOs values
Author: Ali Aghaeifar <[email protected]>
"""
def read_dico(twixObj):
mdb_vop = [mdb for mdb in twixObj[-1]['mdb'] if mdb.is_flag_set('MDH_VOP')]
# concatenate segments of RFs longer than 1ms
DICO_comb = []
for mdb in tqdm(mdb_vop, desc='Reading DICO'):
if mdb.mdh.Counter.Ide == 0:
DICO_comb.append(mdb.data)
else:
DICO_comb[-1] = np.concatenate((DICO_comb[-1],mdb.data), axis=1)
DICO = []
shapes = [dico.shape for dico in DICO_comb] # all shapes
shapes = sorted(set(shapes), key=shapes.index) # unique shapes
for i, shape in enumerate(shapes):
temp = [dico for dico in tqdm(DICO_comb, desc=f'RF Pulse {i}') if dico.shape == shape]
DICO.append(np.stack(temp, axis=-1))
forward = [dico_frw[::2] for dico_frw in DICO]
reflect = [dico_rfl[1::2] for dico_rfl in DICO]
return forward, reflect
# memory optimized version, but slower. Only save integral of forward signal
def read_dico_memOpt(twixObj):
mdb_vop = [mdb for mdb in twixObj[-1]['mdb'] if mdb.is_flag_set('MDH_VOP')]
forward_integral = []
forward_length = []
for mdb in tqdm(mdb_vop, desc = 'Reading DICO'):
DICO_integral = np.sum(np.abs(mdb.data[::2]), axis=1)
DICO_length = mdb.data.shape[1]
if mdb.mdh.Counter.Ide == 0:
forward_integral.append(DICO_integral)
forward_length.append(DICO_length)
else:
forward_integral[-1] = forward_integral[-1] + DICO_integral
forward_length[-1] = forward_length[-1] + DICO_length
forward_integral = np.stack(forward_integral, axis=-1)
# split RFs with different lengths
forward_length_unq = sorted(set(forward_length), key=forward_length.index)
forward_integral = [forward_integral[:, np.where(np.array(forward_length) == l)[0]] for l in forward_length_unq]
return forward_integral, forward_length_unq
def plot_drift(twixObj):
forward_integral, _ = read_dico_memOpt(twixObj)
for dico in forward_integral:
_, ax = plt.subplots()
ax.plot(forward_integral[0].squeeze().T)
plt.show()
|
aghaeifar-publications/RFPA_drift
|
dico_tools.py
|
dico_tools.py
|
py
| 2,283 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "tqdm.auto.tqdm",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "tqdm.auto.tqdm",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "numpy.stack",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "tqdm.auto.tqdm",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "numpy.stack",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 62,
"usage_type": "name"
}
] |
29282262756
|
# -*- coding: utf-8 -*-
import ispformat.schema as _schema
from jsonschema import Draft4Validator, RefResolver, draft4_format_checker
from jsonschema.exceptions import RefResolutionError, ValidationError
from urlparse import urlsplit
class MyRefResolver(RefResolver):
def resolve_remote(self, uri):
# Prevent remote resolving
raise RefResolutionError("LOL NOPE")
geojson_allowed_types=('Polygon', 'MultiPolygon')
def validate_geojson_type(d):
"""
Make sure a geojson dict only contains allowed geometry types
"""
type_=d.get('type')
if type_ not in geojson_allowed_types:
return False
return True
def validate_geojson(geodict):
"""
Convenience function to validate a geojson dict
"""
_version = 0.1
schema = _schema.load_schema(_version, 'geojson/geojson')
v = Draft4Validator(
schema,
resolver=MyRefResolver.from_schema(schema, store=_schema.deps_for_version(_version)),
format_checker=draft4_format_checker,
)
for err in v.iter_errors(geodict):
return False
if not validate_geojson_type(geodict):
return False
return True
def validate_isp(jdict):
"""
Validate a json-object against the isp json-schema
"""
if not 'version' in jdict:
raise ValidationError(u'version is a required property')
try:
schema=_schema.versions[jdict['version']]
except (AttributeError, TypeError, KeyError):
raise ValidationError(u'version %r unsupported'%jdict['version'])
v=Draft4Validator(
schema,
resolver=MyRefResolver.from_schema(schema, store=_schema.deps_for_version(jdict['version'])),
format_checker=draft4_format_checker,
)
for err in v.iter_errors(jdict):
yield err
def is_valid_url(u):
try:
pu=urlsplit(u)
except:
return False
if pu.scheme not in ('', 'http', 'https'):
return False
if not pu.netloc:
return False
return True
if 'website' in jdict and not is_valid_url(jdict['website']):
yield ValidationError(u'%r must be an absolute HTTP URL'%u'website',
instance=jdict[u'website'], schema=schema[u'properties'][u'website'],
path=[u'website'], schema_path=[u'properties', u'website', u'description'],
validator=u'validate_url', validator_value=jdict['website'])
if 'logoURL' in jdict and not is_valid_url(jdict['logoURL']):
yield ValidationError(u'%r must be an absolute HTTP URL'%u'logoURL',
instance=jdict[u'logoURL'], schema=schema[u'properties'][u'logoURL'],
path=[u'logoURL'], schema_path=[u'properties', u'logoURL', u'description'],
validator=u'validate_url', validator_value=jdict['logoURL'])
sch=schema[u'properties'][u'otherWebsites'][u'patternProperties'][u'^.+$']
for name, url in jdict.get('otherWebsites', {}).iteritems():
if is_valid_url(url):
continue
yield ValidationError(u'%r must be an absolute HTTP URL'%name,
instance=url, schema=sch, path=[u'otherWebsite', name],
schema_path=[u'properties', u'otherWebsites', u'patternProperties', u'^.+$', 'description'],
validator=u'validate_url', validator_value=url)
for i, ca in enumerate(jdict.get('coveredAreas', [])):
area=ca.get('area')
if area and validate_geojson_type(area):
continue
elif not area:
continue
yield ValidationError(
u'GeoJSON can only contain the following types: %s'%repr(geojson_allowed_types),
instance=ca, schema=schema[u'definitions'][u'coveredArea'][u'properties'][u'area'],
path=['coveredAreas', i, 'area'],
schema_path=[u'properties', u'coveredAreas', u'items', u'properties', u'area'],
validator=u'validate_geojson_type', validator_value=ca
)
|
Psycojoker/isp-format
|
ispformat/validator/schemavalidator.py
|
schemavalidator.py
|
py
| 4,121 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "jsonschema.RefResolver",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "jsonschema.exceptions.RefResolutionError",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "ispformat.schema.load_schema",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "ispformat.schema",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "jsonschema.Draft4Validator",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "ispformat.schema.deps_for_version",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "ispformat.schema",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "jsonschema.draft4_format_checker",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "jsonschema.exceptions.ValidationError",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "ispformat.schema.versions",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "ispformat.schema",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "jsonschema.exceptions.ValidationError",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "jsonschema.Draft4Validator",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "ispformat.schema.deps_for_version",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "ispformat.schema",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "jsonschema.draft4_format_checker",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "urlparse.urlsplit",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "jsonschema.exceptions.ValidationError",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "jsonschema.exceptions.ValidationError",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "jsonschema.exceptions.ValidationError",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "jsonschema.exceptions.ValidationError",
"line_number": 106,
"usage_type": "call"
}
] |
41237359815
|
import os, sys
import multiprocessing as mp
def pipeline(config, genome, protocol, cells, minreads, name, fq1, fq2, dir,
top_million_reads, step, parallel):
"""
Run the Data Processing Pipeline...
#. Stats and count the barcode from pair-end 1 sequences;
#. Read the barcode counts files;
#. Correct the barcode with 1bp mismatch;
#. Stats the mismatch barcode reads and sequences;
#. Determine wheather mutate on the last base (show A/T/C/G with similar ratio at the last base);
#. Filter by whitelist;
#. Filter by read counts (>=min_reads);
#. Print the number of barcode and reads retained after each steps.
Usage:
::
from baseqDrops import pipeline
pipeline("", "hg38", "10X", 1000, minreads, name, fq1, fq2, dir, top_million_reads, step, parallel)
#specify the length of UMI and barcodes
pipeline("", "hg38", "10X", 1000, minreads, name, fq1, fq2, dir, top_million_reads, step, parallel)
#Run in command line
baseqdrops
Protocols:
#. For 10X: 16bp Barcode and 10 bp UMI => 10X (most commonly used)
#. For 10X: 14bp Barcode and 5/10 bp UMIs => 10X_14_10 / 10X_14_5 (For some old version data)
#. For DropSeq ==> dropseq
#. For inDrop ==> indrop
Args:
config: The path of configuration file;
genome: Genome version (hg38, mm10, hg38_mm10);
cells: Max number of cells;
minreads: Minimum number of reads for a cell barcode (10000);
name: Samplename;
fq1, fq2: Path to fastq reads;
dir: The folder for processing, a folder with samplename will be created;
top_million_reads: Number of reads used for processing in Million;
step: Steps to run;
parallel: How many thread to use;
Steps:
count
stats
split
star
tagging
table
"""
from . import count_barcode
from . import valid_barcode
from . import split_by_barcode
from .barcode.split_fast import split_by_barcode_faster
from .tagging.prime3 import check_reference_files
#Set Config Files...
if config:
if not os.path.exists(config):
sys.exit("[error] The config file does not exist in: {}!".format(config))
os.environ["BASEQCFG"] = config
#Check Annotation Files
print('[info] Checking Reference Files...')
check_reference_files(genome)
print('[info] Start Processing Your RNA-Seq Dataset ...')
dir = os.path.abspath(os.path.join(dir, name))
bc_counts = os.path.join(dir, "barcode_count_{}.csv".format(name))
bc_stats = os.path.join(dir, "barcode_stats_{}.csv".format(name))
bc_splits_dir = os.path.join(dir, "barcode_splits")
align_dir = os.path.join(dir, "star_align")
tagging_dir = os.path.join(dir, "read_tagging")
tpm_table = os.path.join(dir, "Result.UMIs.{}.txt".format(name))
reads_table = os.path.join(dir, "Result.Reads.{}.txt".format(name))
from itertools import product
barcode_prefix = [x[0] + x[1] for x in list(product('ATCG', repeat=2))]
dirs = [dir, align_dir, tagging_dir, bc_splits_dir]
for dir in dirs:
if not os.path.exists(dir):
os.mkdir(dir)
#Check the existance of the files
if not os.path.exists(fq1):
sys.exit("[error] Fastq-1 does not exist!")
if not os.path.exists(fq2):
sys.exit("[error] Fastq-2 does not exist!")
#count barcode
if step in ["all", "count"]:
print("[info] Counting the barcodes ...")
count_barcode(fq1, bc_counts, protocol, min_reads=50, topreads=int(top_million_reads))
#aggregate
if step in ["all", "stats"]:
print("[info] Aggregating the barcodes errors and get valid ones ...")
valid_barcode(protocol, bc_counts, max_cell=cells, min_reads=minreads, output=bc_stats)
#barcode split
if step in ["all", "split"]:
print("[info] Split the barcode for each Barcode Prefix ...")
split_by_barcode_faster(name, protocol, bc_stats, fq1, fq2,
bc_splits_dir, int(top_million_reads))
#run alignment
if step in ["all", "star"]:
from .star import star_align
star_align(bc_splits_dir, align_dir, name, genome, parallel=int(parallel))
#run reads tagging
if step in ["all", "tagging"]:
from .tagging.prime3 import tagging_reads
print('[info] Tagging the reads to genes...')
pool = mp.Pool(processes=int(parallel))
result = []
for bc in barcode_prefix:
bamfile = os.path.join(align_dir, "{}/{}.sort.bam".format(bc, bc))
outfile = os.path.join(tagging_dir, "tagging.{}.txt".format(bc))
#tagging_reads(genome, bamfile, outfile)
result.append(pool.apply_async(tagging_reads, (genome, bamfile, outfile,)))
pool.close()
pool.join()
#run Table aggragation
if step in ["all", "table"]:
print('[info] Build gene expression table from the tagging files...')
from .aggregate import read_barcode_gene_file, write_to_table
pool = mp.Pool(processes=int(parallel))
result = []
for bc in barcode_prefix:
filepath = os.path.join(tagging_dir, "tagging.{}.txt".format(bc))
result.append(pool.apply_async(read_barcode_gene_file, (filepath, 1)))
pool.close()
pool.join()
from itertools import chain
barcodes_all = [x.get()[0] for x in result]
barcodes_lists = list(chain(*barcodes_all))
exp = {}
UMIs_all = [x.get()[1] for x in result]
for UMI in UMIs_all:
for gene in UMI:
if gene in exp:
exp[gene].update(UMI[gene])
else:
exp[gene] = UMI[gene]
write_to_table(barcodes_lists, exp, tpm_table, "UMI Table")
exp = {}
Reads_all = [x.get()[2] for x in result]
for UMI in Reads_all:
for gene in UMI:
if gene in exp:
exp[gene].update(UMI[gene])
else:
exp[gene] = UMI[gene]
write_to_table(barcodes_lists, exp, reads_table, "Read Count Table")
if __name__ == "__main__":
print("begin running pipeline")
|
beiseq/baseqDrops
|
package/baseqDrops/pipeline.py
|
pipeline.py
|
py
| 6,329 |
python
|
en
|
code
| 13 |
github-code
|
6
|
[
{
"api_name": "os.path.exists",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_name": "tagging.prime3.check_reference_files",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "os.path.abspath",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 75,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 76,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 77,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 78,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 79,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 80,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 81,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 82,
"usage_type": "attribute"
},
{
"api_name": "itertools.product",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 90,
"usage_type": "attribute"
},
{
"api_name": "os.mkdir",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 94,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 97,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "barcode.split_fast.split_by_barcode_faster",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "star.star_align",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "multiprocessing.Pool",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 128,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 129,
"usage_type": "attribute"
},
{
"api_name": "tagging.prime3.tagging_reads",
"line_number": 131,
"usage_type": "argument"
},
{
"api_name": "multiprocessing.Pool",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 143,
"usage_type": "attribute"
},
{
"api_name": "aggregate.read_barcode_gene_file",
"line_number": 144,
"usage_type": "argument"
},
{
"api_name": "itertools.chain",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "aggregate.write_to_table",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "aggregate.write_to_table",
"line_number": 170,
"usage_type": "call"
}
] |
31036723867
|
'''
Source code modified from https://github.com/budzianowski/PyTorch-Beam-Search-Decoding/blob/master/decode_beam.py
implementation of beam search on GPT-2's logits
'''
import operator
import torch
import torch.nn as nn
import torch.nn.functional as F
from queue import PriorityQueue
import sys
class BeamSearchNode(object):
def __init__(self, hiddenstate, previousNode, wordId, logProb, length):
'''
:param hiddenstate:
:param previousNode:
:param wordId:
:param logProb:
:param length:
'''
self.h = hiddenstate
self.prevNode = previousNode
self.wordid = wordId
self.logp = logProb
self.leng = length
def eval(self, alpha=1.0):
reward = 0
# Add here a function for shaping a reward
return self.logp / float(self.leng - 1 + 1e-6) + alpha * reward
def __lt__(self, x):
if(self.eval() < x.eval()):
return True
else:
return False
def beam_decode_sentence(hidden_X, config,num_generate=1, beam_size = 5, batch_size = 1):
'''
generate a sentence based on beam search
:param hidden_X: hidden_X of sentence embedding (1024) with/without projection
:param model: GPT-2 model
:param tokenizer: GPT-2 tokenizer
:return: decoded_batch
'''
#SOS_token = tokenizer.encode("<|endoftext|>")
beam_width = beam_size
topk = num_generate # how many sentence do you want to generate
past = None
model = config['model']
tokenizer =config['tokenizer']
eos = [tokenizer.encode(tokenizer.eos_token)]
EOS_token = eos
hidden_X_unsqueeze = torch.unsqueeze(hidden_X, 0)
hidden_X_unsqueeze = torch.unsqueeze(hidden_X_unsqueeze, 0) #[1,1,embed_dim]
decoded_batch = []
# decoding goes sentence by sentence
for idx in range(batch_size):
# Number of sentence to generate
endnodes = []
number_required = min((topk + 1), topk - len(endnodes))
# starting node - hidden vector, previous node, word id, logp, length hiddenstate, previousNode, wordId, logProb, length
node = BeamSearchNode(past, None, torch.tensor([[220]]).cuda(), 0, 1) # 220 refers to single space ' ' on GPT
nodes = PriorityQueue()
# start the queue
nodes.put((-node.eval(), node))
qsize = 1
# start beam search
for text_len in range(50):
# give up when decoding takes too long
if qsize > 2000: break
# fetch the best node
try:
score, n = nodes.get()
except:
print('Cannot get nodes')
while not nodes.empty():
next_item = nodes.get()
print(next_item)
prev_input = n.wordid
past = n.h
if n.wordid.item() == EOS_token[0] and n.prevNode != None:
endnodes.append((score, n))
# if we reached maximum # of sentences required
if len(endnodes) >= number_required:
break
else:
print('continue')
continue
# decode for one step using decoder
if(text_len == 0):
logits, past = model(inputs_embeds=hidden_X_unsqueeze,past_key_values = past,return_dict=False)
else:
logits, past = model(prev_input,past_key_values = past, attention_mask=None, return_dict=False)
logits = logits[:, -1, :]
probs = torch.softmax(logits, dim=-1)
# PUT HERE REAL BEAM SEARCH OF TOP
log_prob, indexes = torch.topk(probs, beam_width)
nextnodes = []
for new_k in range(beam_width):
decoded_t = indexes[0][new_k].view(1, -1)
log_p = log_prob[0][new_k].item()
#### hiddenstate, previousNode, wordId, logProb, length
node = BeamSearchNode(past, n, decoded_t, n.logp + log_p, n.leng + 1)
score = -node.eval()
nextnodes.append((score, node))
# put them into queue
for i in range(len(nextnodes)):
score, nn = nextnodes[i]
try:
nodes.put((score, nn))
except:
print('Cannot put nodes')
print(score)
print(nn)
# increase qsize
qsize += len(nextnodes) - 1
# for loop ends here
# choose nbest paths, back trace them
if len(endnodes) == 0:
endnodes = [nodes.get() for _ in range(topk)]
utterances = []
text = []
for score, n in sorted(endnodes, key=operator.itemgetter(0)):
utterance = []
utterance.append(n.wordid.item())
# back trace
while n.prevNode != None:
n = n.prevNode
utterance.append(n.wordid.item())
utterance = utterance[::-1]
utterances.append(utterance)
decode_process = tokenizer.decode(utterance[1:-1])
text.append(decode_process)
decoded_batch.append(utterances)
return text
def greedy_decode(decoder_hidden, encoder_outputs, target_tensor):
'''
:param target_tensor: target indexes tensor of shape [B, T] where B is the batch size and T is the maximum length of the output sentence
:param decoder_hidden: input tensor of shape [1, B, H] for start of the decoding
:param encoder_outputs: if you are using attention mechanism you can pass encoder outputs, [T, B, H] where T is the maximum length of input sentence
:return: decoded_batch
'''
batch_size, seq_len = target_tensor.size()
decoded_batch = torch.zeros((batch_size, MAX_LENGTH))
decoder_input = torch.LongTensor([[SOS_token] for _ in range(batch_size)], device=device)
for t in range(MAX_LENGTH):
decoder_output, decoder_hidden = decoder(decoder_input, decoder_hidden, encoder_outputs)
topv, topi = decoder_output.data.topk(1) # get candidates
topi = topi.view(-1)
decoded_batch[:, t] = topi
decoder_input = topi.detach().view(-1, 1)
return decoded_batch
|
HKUST-KnowComp/GEIA
|
decode_beam_search.py
|
decode_beam_search.py
|
py
| 6,377 |
python
|
en
|
code
| 22 |
github-code
|
6
|
[
{
"api_name": "torch.unsqueeze",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "torch.unsqueeze",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "queue.PriorityQueue",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "torch.softmax",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "torch.topk",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 127,
"usage_type": "name"
},
{
"api_name": "torch.nn",
"line_number": 129,
"usage_type": "name"
},
{
"api_name": "torch.nn",
"line_number": 133,
"usage_type": "argument"
},
{
"api_name": "operator.itemgetter",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "torch.zeros",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "torch.LongTensor",
"line_number": 170,
"usage_type": "call"
}
] |
50277551
|
from typing import *
# ref https://leetcode.cn/problems/naming-a-company/solution/by-endlesscheng-ruz8/
from collections import defaultdict
class Solution:
def distinctNames(self, ideas: List[str]) -> int:
group = defaultdict(int)
for s in ideas:
group[s[1:]] |= 1 << (ord(s[0]) - ord('a'))
ans = 0
# 定义 \textit{cnt}[i][j]cnt[i][j] 表示组中首字母不包含 ii 但包含 jj 的组的个数。枚举每个组,统计 \textit{cnt}cnt,
# 同时枚举该组的首字母 ii 和不在该组的首字母 jj,答案即为 \textit{cnt}[i][j]cnt[i][j] 的累加值
cnt = [[0] * 26 for _ in range(26)]
for mask in group.values():
for i in range(26):
if mask >> i & 1 == 0:
for j in range(26):
if mask >> j & 1:
cnt[i][j] += 1
else:
for j in range(26):
if mask >> j & 1 == 0:
ans += cnt[i][j]
return ans * 2
|
code-cp/leetcode
|
solutions/6094/main.py
|
main.py
|
py
| 1,093 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "collections.defaultdict",
"line_number": 7,
"usage_type": "call"
}
] |
74492815226
|
"""Wrapper a los proceso de scrapping
"""
import tempfile
import os
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
# pylint: disable=unused-import
from scrapper.procesos.patentes_inpi_novedades import patentes_inpi_novedades
from scrapper.procesos.zonaprop import zonaprop
from scrapper.procesos.dummy import dummy_download_file
from scrapper.procesos.inpi_novedades import inpi_novedades
def get_chrome_driver(download_folder, show=False):
"""Configura y retorna el driver chrome
Args:
download_folder (str): Carpeta de descarga deseada
show (bool): Establece si se muestra la operación en el navegador
Returns:
driver: retorna el objeto driver
"""
chrome_options = Options()
if not show:
chrome_options.add_argument("--headless=new")
chrome_options.add_argument(f"--download-directory={download_folder}")
chrome_options.add_argument("--start-maximized")
chrome_options.add_experimental_option("prefs", {
"download.prompt_for_download": False,
"download.directory_upgrade": True,
"download.default_directory": download_folder,
"safebrowsing.enabled": True,
'safebrowsing.disable_download_protection': True
})
chrome_options.add_experimental_option('excludeSwitches', ['enable-logging'])
caps = DesiredCapabilities().CHROME
#caps["pageLoadStrategy"] = "normal" # complete
#caps["pageLoadStrategy"] = "eager" # interactive
caps["pageLoadStrategy"] = "none"
driver = webdriver.Chrome(options=chrome_options, desired_capabilities=caps)
return driver
def scrap(proceso,
config,
log,
inputparam=None,
inputfile=None,
outputpath = None,
show_browser=False):
"""Ejecución generica de un proceso modelo de scrapping
Args:
proceso (str): Nombre del proceps
config (Config): Objeto de Configuración
log (Log): Objeto de Logging
inputparam (str, optional): Cadena variable de parámetros dada por el usuario.
Defaults to None.
inputfile (str, optional): Archivo de entrada. Defaults to None.
outputpath (str, optional): Carpeta de salida de los resultados. Defaults to None.
show_browser (bool, optional): Se muestra muestra el navegador durante el proceso
de scrapping.
Defaults to False.
Returns:
List: Lista de valores capturados
"""
if outputpath is None:
workpath = tempfile.mkdtemp()
else:
workpath = outputpath
temp_download_folder = os.path.join(workpath, "tmp")
os.makedirs(temp_download_folder, exist_ok=True)
log.info(f"Carpeta de descarga: {temp_download_folder}")
driver = get_chrome_driver(download_folder=temp_download_folder, show=show_browser)
section = "proc:" + proceso
function_name = config[section]["function"]
datos = []
if function_name in globals():
function = globals()[function_name]
log.info(f"Invocando a: {function_name}")
try:
datos = function(driver=driver,
log=log,
parametros=config[section],
inputfile=inputfile,
tmpdir=workpath,
inputparam=inputparam,
outputpath=outputpath,
show_browser = show_browser)
# pylint: disable=broad-except
except Exception as err:
log.exception("General en la invocación a la función de scrapping")
log.exception(str(err))
else:
log.error(f"proceso {function_name} no implementado")
return datos
|
pmoracho/scrapper-2
|
scrapper/scrapping.py
|
scrapping.py
|
py
| 3,958 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "selenium.webdriver.chrome.options.Options",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.common.desired_capabilities.DesiredCapabilities",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "tempfile.mkdtemp",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 83,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 84,
"usage_type": "call"
}
] |
21669508962
|
import requests
from common.log import GetLogger
log = GetLogger().get_logger()
class BaseRequest():
def __init__(self):
pass
def get(self,url,params=None,**kwargs):
try:
response=requests.get(url,params=params,**kwargs)
log.info("==========接口API请求开始===========")
log.info("请求URL:{}/n请求参数:{}".format(url,params))
log.info("接口请求成功!返回参数:{}".format(response.text))
log.info("===========结束===========")
except Exception as e:
log.error("接口请求异常!{0}".format(e))
def post(self,url,data=None,json=None,headers=None,**kwargs):
try:
respose = requests.post(url=url,data=data.encode("utf-8"),json=json,headers=headers,**kwargs)
log.info("===========接口API请求开始===========")
log.info("请求URL:{}".format(url))
log.info("请求参数:{}".format(data,json))
log.info("响应参数:{}".format(respose.text))
log.info("响应时间:{}ms".format(respose.elapsed.total_seconds()*1000))
log.info("===========结束===========")
return respose
except Exception as e:
log.error("接口请求异常!{0}".format(e))
# if __name__ == '__main__':
# Headers = {
# "content-type": "application/json"
# }
# a=BaseRequest()
# a.get("http://192.168.122.105:9900/basic-user/web/business/system/list")
# data={
# "businessSystemCode": "DAC",
# "businessSystemId": 0,
# "businessSystemKey": "DAC",
# "businessSystemName": "基础配置中心"
# }
# a.post("http://192.168.122.105:9900/basic-user/web/business/system/save",data=data,headers=Headers)
|
menghuai1995/PythonAutoCode
|
API_Autotest/common/baseRequest.py
|
baseRequest.py
|
py
| 1,787 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "common.log.GetLogger",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 23,
"usage_type": "call"
}
] |
10775113939
|
import re
import os
from collections import Counter, defaultdict, namedtuple
from itertools import combinations, product
from pprint import pprint
from parse import parse, findall
from math import prod, sqrt
dirname = os.path.dirname(__file__)
data = open(f'{dirname}/21-input.txt').read().splitlines()
data = [parse('{} (contains {})', d).fixed for d in data]
data = [(set(i.split(' ')), a.split(', ')) for i, a in data]
set_of_all_ingredients = set.union(*[i for i, _ in data])
all_ingredients = sum((list(i) for i, _ in data), list())
result = dict()
for ingredients, allergens in data:
for allergen in allergens:
if allergen in result:
result[allergen] &= ingredients
else:
result[allergen] = ingredients.copy()
set_of_ingredients_with_no_allergens = set_of_all_ingredients.copy()
for allergen, ingredients in result.items():
set_of_ingredients_with_no_allergens -= ingredients
print(sum([all_ingredients.count(s) for s in set_of_ingredients_with_no_allergens]))
result2 = {}
allergens_left = list(result.keys())
ingredients_identified = set()
while allergens_left:
for allergen in allergens_left.copy():
possibles = result[allergen] - ingredients_identified
if len(possibles) == 1:
ingredient = possibles.pop()
result2[allergen] = ingredient
ingredients_identified.add(ingredient)
allergens_left.remove(allergen)
print(','.join(result2[allergen] for allergen in sorted(result2.keys())))
|
knjmooney/Advent-Of-Code
|
2020/21-allergens.py
|
21-allergens.py
|
py
| 1,513 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "os.path.dirname",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "parse.parse",
"line_number": 10,
"usage_type": "call"
}
] |
37111986575
|
#얘는 계속 실행이 되어야 해서, jupyter notebook에서는 안된다.
#이걸 하는 목적 : dialog flow로부터 데이터를 받아, 여기서 처리한 후 다시 dialog flow로 반환
#그걸 위해서는 json으로 리턴해야 한다.
import requests
import urllib
import IPython.display as ipd
import json
from bs4 import BeautifulSoup
from flask import Flask, request, jsonify
def getWeather(city) :
url = "https://search.naver.com/search.naver?query="
url = url + urllib.parse.quote_plus(city + "날씨")
print(url)
bs = BeautifulSoup(urllib.request.urlopen(url).read(), "html.parser")
temp = bs.select('span.todaytemp')
desc = bs.select('p.cast_txt')
#dictionery가 좋은 리턴방식이다.
return {"temp":temp[0].text, "desc":desc[0].text} #temp가 온도, desc가 어제보다 4도 낮아요.
#return {"temp":temp[4+7].text, "desc":desc[0].text} #dctionery방식으로 하면, 이런식으로, 수정할때 용이하다.
#return temp[0].text + "/" + desc[0].text #리턴 값을 문자열로 준다.
#Flask 객체 생성
app = Flask(__name__)
@app.route('/') #'데코레이터'라고 한다. 특정 함수가 호출할때, 앞뒤로 감싸는것, 클래스에 선언된 route다. 브라우저에 입력한것을 home에 넣어준다.
#잘 몰라도, 웹어플리케이션을 쉽게 만들도록 해준다.
def home():
name = request.args.get("name")
item = request.args.get("item")
return "hello"#호출할때, 반드시 name이라는 파라미터를 호출해야한다.
@app.route('/abc')#데코레이터'라고 한다. 특정 함수가 호출할때, 앞뒤로 감싸는것, 클래스에 선언된 route다. 브라우저에 입력한것을 home에 넣어준다.
#잘 몰라도, 웹어플리케이션을 쉽게 만들도록 해준다.
def abc():
return "test"
@app.route('/weather')#데코레이터'라고 한다. 특정 함수가 호출할때, 앞뒤로 감싸는것, 클래스에 선언된 route다. 브라우저에 입력한것을 home에 넣어준다.
#잘 몰라도, 웹어플리케이션을 쉽게 만들도록 해준다.
def weather():
city = request.args.get("city")
info = getWeather(city)
#return "<font color=red>" + info["temp"] + "도 " + info["desc"] + "</font>"
#return info #웹표준방식이 아니어서, 안된다.
#return json.dumps(info)
return jsonify(info)
#어떤 요청이 들어와도, 무조건, Hello만 리턴하는 서버
#GET방식으로도, POST방식으로도 호출 가능하게 한것, 서비스 할때는, GET방식을 빼준다.
#GET방식은 디버깅할때 사용, 공인 서버가 아니다 보니까, dialog가 우리서버를 호출할 수 없다.
@app.route('/dialogflow', methods=['GET', 'POST'])
def dialogflow():
req = request.get_json(force=True)
print(json.dumps(req, indent=4))
res = {'fulfillmentText':'Hello~~~'}
return jsonify(res)
#dialogflow에서 만든 규약을 지켜서 return을 해야한다. json파일로 해야한다.
if __name__ == '__main__':
#host = 0.0.0.0에는 실제 ip를 넣어주면 된다. 0.0.0.0은 ip를 모르더라도 접속할 수 있다. 원래는 자기 ip를 써야 한다.
#그럴때, 쓸 수 있는게 0.0.0.0, 127.0.0.1 두가지를 사용할 수 있다.
app.run(host='0.0.0.0', port = 3000, debug=True)
|
ssh6189/2020.02.05
|
server.py
|
server.py
|
py
| 3,365 |
python
|
ko
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "urllib.parse.quote_plus",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "urllib.parse",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "urllib.request.urlopen",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "urllib.request",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "flask.Flask",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "flask.request.args.get",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "flask.request.args.get",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "flask.request.args.get",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "flask.jsonify",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "flask.request.get_json",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "flask.request",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "json.dumps",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 62,
"usage_type": "call"
}
] |
41794735960
|
import datetime
import unittest
from pyspark import SparkConf
from pyspark.sql import SparkSession
import pyspark.sql.functions as f
from pyspark.sql.types import StructType, StructField, IntegerType, StringType, MapType, ArrayType
import json
import csv
from src.transformations import add_columns, running_total, group_sales_by_type
# TODO: Include testing output map and array data
class TestTransformations(unittest.TestCase):
def setUp(self) -> None:
print("Setting up Spark")
conf = SparkConf().set("spark.driver.memory", "8g")
self.spark = SparkSession \
.builder \
.master("local[4]") \
.config(conf=conf) \
.appName("test simple transformation") \
.getOrCreate()
def test_add_columns(self):
# Create test data with each row as tuple
test_data = [(1, 2), (3, 4), (5, 6)]
# Create test DataFrame from the test data, pass the column names as required
test_df = self.spark.createDataFrame(data=test_data, schema=["first", "second"])
# Show data-frame
test_df.show(truncate=False)
# Execute transformation on the test data-frame and show the results
result_df = test_df.transform(add_columns)
result_df.show(truncate=False)
# Validate column
result_columns = result_df.columns
self.assertIn("sum", result_columns)
# Get rest column out of the data frame as list
result_data = result_df.select("sum").collect()
result_data = [item["sum"] for item in result_data]
# Validate result column values
self.assertListEqual(result_data, [3, 7, 11])
def test_map_data(self):
test_data = [
(1, "product_1", "2022-11-01", {"store 1": 12, "store 2": 3, "online": 5}),
(2, "product_1", "2022-11-02", {"store 1": 5, "online": 2}),
(3, "product_1", "2022-11-04", {"store 1": 8, "store 2": 12, "online": 11}),
(4, "product_1", "2022-11-05", {"store 1": 3, "store 2": 3})
]
test_df = self.spark.createDataFrame(test_data, schema=["order_id", "product", "date", "sales"])
test_df.show(truncate=False)
test_df.printSchema()
test_df_schema = StructType([
StructField(name="order_id", dataType=IntegerType(), nullable=False),
StructField(name="product", dataType=StringType(), nullable=False),
StructField(name="date", dataType=StringType(), nullable=False),
StructField(name="sales", dataType=MapType(StringType(), IntegerType(), valueContainsNull=False), nullable=False),
])
test_df = self.spark.createDataFrame(test_data, schema=test_df_schema)
test_df.show(truncate=False)
test_df.printSchema()
def test_list_data(self):
test_data = [
(1, "product_1", "2022-11-01", "2022-11-05", [3, 4, 6, 7, 12]),
(2, "product_1", "2022-11-06", "2022-11-12", [8, 4, 3, 1, 16, 13, 25]),
(3, "product_1", "2022-11-13", "2022-11-15", [3, 3, 6]),
(4, "product_2", "2022-11-01", "2022-11-07", [1, 12, 6, 9, 12, 2, 2]),
]
test_df_schema = StructType([
StructField(name="order_id", dataType=IntegerType(), nullable=False),
StructField(name="product", dataType=StringType(), nullable=False),
StructField(name="start_date", dataType=StringType(), nullable=False),
StructField(name="end_date", dataType=StringType(), nullable=False),
StructField(name="sales", dataType=ArrayType(IntegerType()), nullable=False),
])
test_df = self.spark.createDataFrame(test_data, schema=test_df_schema)\
.withColumn("start_date", f.to_date("start_date"))\
.withColumn("end_date", f.to_date("end_date"))
test_df.show(truncate=False)
test_df.printSchema()
sales_data_raw = test_df.select("sales").collect()
print(sales_data_raw)
sales_data = [item["sales"] for item in sales_data_raw]
print(sales_data)
print(type(sales_data))
print([[type(item) for item in data] for data in sales_data])
self.assertListEqual(
sales_data,
[[3, 4, 6, 7, 12], [8, 4, 3, 1, 16, 13, 25], [3, 3, 6], [1, 12, 6, 9, 12, 2, 2]]
)
def test_group_sales_by_type(self):
# Create test data
test_data = [
(1, "product_1", "online", "2022-11-01", 8),
(2, "product_1", "online", "2022-11-02", 6),
(3, "product_1", "online", "2022-11-04", 12),
(4, "product_1", "retail", "2022-11-01", 11),
(5, "product_1", "retail", "2022-11-02", 15),
(6, "product_1", "retail", "2022-11-03", 22),
(7, "product_1", "retail", "2022-11-04", 21),
(8, "product_2", "online", "2022-11-02", 1),
(9, "product_2", "online", "2022-11-03", 3),
(10, "product_2", "retail", "2022-11-01", 1),
(11, "product_2", "retail", "2022-11-02", 5),
(12, "product_2", "retail", "2022-11-04", 2)
]
# Define test data schema
test_df_schema = StructType([
StructField(name="id", dataType=IntegerType(), nullable=False),
StructField(name="product", dataType=StringType(), nullable=False),
StructField(name="sale_type", dataType=StringType(), nullable=False),
StructField(name="sale_date", dataType=StringType(), nullable=False),
StructField(name="num_sales", dataType=IntegerType(), nullable=False),
])
# Create test DataFrame
test_df = self.spark.createDataFrame(test_data, schema=test_df_schema)\
.withColumn("sale_date", f.to_date("sale_date"))
# Print the data frame and its schema
test_df.show(truncate=False)
test_df.printSchema()
# Run the transformation on test data
grouped_data = test_df.transform(group_sales_by_type)
grouped_data.show(truncate=False)
grouped_data.printSchema()
# Collect results to validate
validation_cols = grouped_data.select("sale_dates", "num_sales").collect()
sale_dates = [item['sale_dates'] for item in validation_cols]
num_sales = [item['num_sales'] for item in validation_cols]
# Print sale_dates column result
print(sale_dates)
# Create and validate expected `sale_dates` result
expected_sale_dates = [
[
datetime.datetime.strptime("2022-11-01", "%Y-%m-%d").date(),
datetime.datetime.strptime("2022-11-02", "%Y-%m-%d").date(),
datetime.datetime.strptime("2022-11-04", "%Y-%m-%d").date()
], [
datetime.datetime.strptime("2022-11-01", "%Y-%m-%d").date(),
datetime.datetime.strptime("2022-11-02", "%Y-%m-%d").date(),
datetime.datetime.strptime("2022-11-03", "%Y-%m-%d").date(),
datetime.datetime.strptime("2022-11-04", "%Y-%m-%d").date()
],
[
datetime.datetime.strptime("2022-11-02", "%Y-%m-%d").date(),
datetime.datetime.strptime("2022-11-03", "%Y-%m-%d").date()
],
[
datetime.datetime.strptime("2022-11-01", "%Y-%m-%d").date(),
datetime.datetime.strptime("2022-11-02", "%Y-%m-%d").date(),
datetime.datetime.strptime("2022-11-04", "%Y-%m-%d").date(),
]
]
self.assertListEqual(sale_dates, expected_sale_dates)
# Validate number of sales result
self.assertListEqual(num_sales, [[8, 6, 12], [11, 15, 22, 21], [1, 3], [1, 5, 2]])
def test_create_struct_data(self):
# Create test data
test_data = [
(1, "product_1", "2022-11-01", {"retail": 8, "online": 12}),
(2, "product_1", "2022-11-02", {"retail": 3}),
(3, "product_1", "2022-11-03", {"retail": 5, "online": 2}),
(4, "product_1", "2022-11-04", {"online": 8}),
(5, "product_2", "2022-11-02", {"retail": 2, "online": 1}),
(6, "product_2", "2022-11-03", {"retail": 3, "online": 2}),
]
# Define test data schema
test_df_schema = StructType([
StructField(name="id", dataType=IntegerType(), nullable=False),
StructField(name="product", dataType=StringType(), nullable=False),
StructField(name="sale_date", dataType=StringType(), nullable=False),
StructField(name="num_sales", dataType=StructType([
StructField("retail", IntegerType(), nullable=True),
StructField("online", IntegerType(), nullable=True),
]))
])
# Create test DataFrame
test_df = self.spark.createDataFrame(test_data, schema=test_df_schema) \
.withColumn("sale_date", f.to_date("sale_date"))
# Print the data frame and its schema
test_df.show(truncate=False)
test_df.printSchema()
# method 1 - process the nested Row instances:
num_sales = test_df.select("num_sales").collect()
print(num_sales)
online_sales = [item['num_sales']['online'] for item in num_sales]
retail_sales = [item['num_sales']['retail'] for item in num_sales]
self.assertListEqual(online_sales, [12, None, 2, 8, 1, 2])
self.assertListEqual(retail_sales, [8, 3, 5, None, 2, 3])
# method 2 - select to separate columns
num_sales_method_2 = test_df.select("num_sales").select("num_sales.*").collect()
print(num_sales_method_2)
online_sales_method_2 = [item['online'] for item in num_sales_method_2]
retail_sales_method_2 = [item['retail'] for item in num_sales_method_2]
self.assertListEqual(online_sales_method_2, [12, None, 2, 8, 1, 2])
self.assertListEqual(retail_sales_method_2, [8, 3, 5, None, 2, 3])
# method 3 - convert the struct column to json
num_sales_method_3 = test_df.withColumn("num_sales", f.to_json(f.col("num_sales"))).select("num_sales").collect()
print(num_sales_method_3)
online_sales_method_3 = [
json.loads(item['num_sales'])['online'] if 'online' in json.loads(item['num_sales']) else None
for item in num_sales_method_3
]
retail_sales_method_3 = [
json.loads(item['num_sales'])['retail'] if 'retail' in json.loads(item['num_sales']) else None
for item in num_sales_method_3
]
self.assertListEqual(online_sales_method_3, [12, None, 2, 8, 1, 2])
self.assertListEqual(retail_sales_method_3, [8, 3, 5, None, 2, 3])
def test_running_total(self):
# # Option 1 - provide a date column
# test_data = [
# (1, "product_1", datetime.strptime("2022-11-01", "%Y-%m-%d").date(), 1),
# (2, "product_1", datetime.strptime("2022-11-03", "%Y-%m-%d").date(), 1),
# (3, "product_1", datetime.strptime("2022-11-04", "%Y-%m-%d").date(), 3),
# (4, "product_1", datetime.strptime("2022-11-05", "%Y-%m-%d").date(), 2),
# (5, "product_2", datetime.strptime("2022-11-02", "%Y-%m-%d").date(), 4),
# (6, "product_2", datetime.strptime("2022-11-04", "%Y-%m-%d").date(), 3),
# ]
# Option 2 - input date as string and cast in Spark
test_data = [
(1, "product_1", "2022-11-01", 1),
(2, "product_1", "2022-11-03", 1),
(3, "product_1", "2022-11-04", 3),
(4, "product_1", "2022-11-05", 2),
(5, "product_2", "2022-11-02", 4),
(6, "product_2", "2022-11-04", 3),
]
test_df_columns = ["order_id", "product", "order_date", "qty"]
test_df = self.spark.createDataFrame(test_data, test_df_columns)\
.withColumn("order_date", f.to_date("order_date"))
test_df.show(truncate=False)
test_df.printSchema()
result_df = test_df.transform(running_total)
result_df.show(truncate=False)
result_data = result_df.select("running_sum_qty").collect()
result_data = [item['running_sum_qty'] for item in result_data]
self.assertListEqual(result_data, [1, 2, 5, 7, 4, 7])
def test_group_sales_by_type_from_file(self):
# Define test data schema
test_df_schema = StructType([
StructField(name="id", dataType=IntegerType(), nullable=False),
StructField(name="product", dataType=StringType(), nullable=False),
StructField(name="sale_type", dataType=StringType(), nullable=False),
StructField(name="sale_date", dataType=StringType(), nullable=False),
StructField(name="num_sales", dataType=IntegerType(), nullable=False),
])
# Read test data from .csv file
test_df = self.spark.read.option("header", True).schema(test_df_schema).csv("test_data/test_data.csv")
test_df.show(truncate=False)
test_df.printSchema()
# Perform the transformation
result_df = test_df.transform(group_sales_by_type)
result_df.show(truncate=False)
result_df.printSchema()
# Extract result data frame to list
result_data_raw = result_df.select("num_sales").collect()
result_data = [item["num_sales"] for item in result_data_raw]
# Load expected data
with open("test_data/test_result.csv", mode='r') as file_handle:
expected_data = [json.loads(line[0]) for line in csv.reader(file_handle)]
print(f"Expected data: {expected_data}")
self.assertListEqual(result_data, expected_data)
|
SA01/spark-unittest-tutorial
|
tests/test_transformations.py
|
test_transformations.py
|
py
| 13,745 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "unittest.TestCase",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "pyspark.SparkConf",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.SparkSession.builder.master",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.SparkSession.builder",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "pyspark.sql.SparkSession",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "src.transformations.add_columns",
"line_number": 39,
"usage_type": "argument"
},
{
"api_name": "pyspark.sql.types.StructType",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types.StructField",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types.IntegerType",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types.StructField",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types.StringType",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types.StructField",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types.StringType",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types.StructField",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types.MapType",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types.StringType",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types.IntegerType",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types.StructType",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types.StructField",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types.IntegerType",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types.StructField",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types.StringType",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types.StructField",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types.StringType",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types.StructField",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types.StringType",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types.StructField",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types.ArrayType",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types.IntegerType",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.functions.to_date",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.functions",
"line_number": 93,
"usage_type": "name"
},
{
"api_name": "pyspark.sql.functions.to_date",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.functions",
"line_number": 94,
"usage_type": "name"
},
{
"api_name": "pyspark.sql.types.StructType",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types.StructField",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types.IntegerType",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types.StructField",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types.StringType",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types.StructField",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types.StringType",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types.StructField",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types.StringType",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types.StructField",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types.IntegerType",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.functions.to_date",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.functions",
"line_number": 138,
"usage_type": "name"
},
{
"api_name": "src.transformations.group_sales_by_type",
"line_number": 145,
"usage_type": "argument"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 160,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 161,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 162,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 164,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 165,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 166,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 167,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 170,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 171,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 171,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 174,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 175,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 176,
"usage_type": "attribute"
},
{
"api_name": "pyspark.sql.types.StructType",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types.StructField",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types.IntegerType",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types.StructField",
"line_number": 198,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types.StringType",
"line_number": 198,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types.StructField",
"line_number": 199,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types.StringType",
"line_number": 199,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types.StructField",
"line_number": 200,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types.StructType",
"line_number": 200,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types.StructField",
"line_number": 201,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types.IntegerType",
"line_number": 201,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types.StructField",
"line_number": 202,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types.IntegerType",
"line_number": 202,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.functions.to_date",
"line_number": 208,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.functions",
"line_number": 208,
"usage_type": "name"
},
{
"api_name": "pyspark.sql.functions.to_json",
"line_number": 234,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.functions",
"line_number": 234,
"usage_type": "name"
},
{
"api_name": "pyspark.sql.functions.col",
"line_number": 234,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 238,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 242,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.functions.to_date",
"line_number": 272,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.functions",
"line_number": 272,
"usage_type": "name"
},
{
"api_name": "src.transformations.running_total",
"line_number": 277,
"usage_type": "argument"
},
{
"api_name": "pyspark.sql.types.StructType",
"line_number": 287,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types.StructField",
"line_number": 288,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types.IntegerType",
"line_number": 288,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types.StructField",
"line_number": 289,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types.StringType",
"line_number": 289,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types.StructField",
"line_number": 290,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types.StringType",
"line_number": 290,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types.StructField",
"line_number": 291,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types.StringType",
"line_number": 291,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types.StructField",
"line_number": 292,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.types.IntegerType",
"line_number": 292,
"usage_type": "call"
},
{
"api_name": "src.transformations.group_sales_by_type",
"line_number": 302,
"usage_type": "argument"
},
{
"api_name": "json.loads",
"line_number": 312,
"usage_type": "call"
},
{
"api_name": "csv.reader",
"line_number": 312,
"usage_type": "call"
}
] |
71176383548
|
import boto3, datetime, time
region = 'us-west-2'
accesskey = os.environ['ACCESSKEY']
secretkey = os.environ['SECRET_ACCESSKEY']
def send_firehose(message: str):
client = boto3.client('firehose', aws_access_key_id=accesskey, aws_secret_access_key=secretkey, region_name=region)
# Send message to firehose
response = client.put_record(
DeliveryStreamName=os.environ['FIREHOSE_NAME'],
Record={
'Data': message
}
)
return response
|
kfunamizu/python_commonlib
|
firehose/src/firehose.py
|
firehose.py
|
py
| 479 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "boto3.client",
"line_number": 9,
"usage_type": "call"
}
] |
12064640805
|
"""
A Module for Encrypt and Decrypt message
"""
import ReadWriteFileManagement
# Import the Fernet module
from cryptography.fernet import Fernet
DATABASE_DIR_PATH = "../../databases/chat_db/"
FILE_NAME = "encryptKey.key"
"""
A function for generating an encryption key.
the function generate the using Fernet class,
and saves the key into file in a specific path
"""
def generate_encryption_key():
# generate a random key for encryption
encrypt_key = Fernet.generate_key()
# Writing the key into a file in order to decode the messages when needed
if ReadWriteFileManagement.create_chat_file(FILE_NAME) is not None:
ReadWriteFileManagement.create_chat_file(FILE_NAME)
# Stores the encryption key into the encryption file
with open(DATABASE_DIR_PATH + FILE_NAME, 'wb') as encryptKey:
encryptKey.write(encrypt_key)
"""
A function for Encrypt a text message
"""
def message_encrypt(message_to_encrypt):
# Opening the file that stores the encrypt key
with open(DATABASE_DIR_PATH + FILE_NAME, 'rb') as en_Key:
key = en_Key.read()
# Creating an instant of the Fernet class with encrypt_key,
# so we can encrypt each message using Fernet methods
fernet = Fernet(key)
# E ncrypt message
enc_message = fernet.encrypt(message_to_encrypt.encode())
return enc_message
"""
A function for Decrypt a text message
"""
def message_decrypt(message_to_decrypt):
# Opening the file that stores the encrypt key
with open(DATABASE_DIR_PATH + FILE_NAME, 'rb') as encryptKey:
key = encryptKey.read()
# Creating an instant of the Fernet class with encrypt_key,
# so we can encrypt each message using Fernet methods
fernet = Fernet(key)
decode_message = fernet.decrypt(message_to_decrypt.decode())
return decode_message
if __name__ == "__main__":
generate_encryption_key()
message = "Hello"
enc_m = message_encrypt(message)
print(enc_m.decode())
dec_m = message_decrypt(enc_m)
print(dec_m.decode())
|
RoyYotam/My-Chat
|
src/help classes/EncryptMessage.py
|
EncryptMessage.py
|
py
| 2,030 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "cryptography.fernet.Fernet.generate_key",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "cryptography.fernet.Fernet",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "ReadWriteFileManagement.create_chat_file",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "ReadWriteFileManagement.create_chat_file",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "cryptography.fernet.Fernet",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "cryptography.fernet.Fernet",
"line_number": 59,
"usage_type": "call"
}
] |
31973217705
|
"""filename and file size in file model
Revision ID: 6d23296b922b
Revises: 6ec29c8de008
Create Date: 2023-03-02 17:47:25.025321
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '6d23296b922b'
down_revision = '6ec29c8de008'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('files', schema=None) as batch_op:
batch_op.add_column(sa.Column('filename', sa.String(length=255), nullable=True))
batch_op.add_column(sa.Column('file_size', sa.String(length=255), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('files', schema=None) as batch_op:
batch_op.drop_column('file_size')
batch_op.drop_column('filename')
# ### end Alembic commands ###
|
synzr/file-transfer-service
|
migrations/versions/6d23296b922b_filename_and_file_size_in_file_model.py
|
6d23296b922b_filename_and_file_size_in_file_model.py
|
py
| 952 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "alembic.op.batch_alter_table",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.String",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.String",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "alembic.op.batch_alter_table",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 30,
"usage_type": "name"
}
] |
2931948188
|
from typing import Dict
from network.api.base import BaseRepository
import re
from http.cookies import SimpleCookie
import dukpy
import requests
import http.client
from network.api.base_mainpage_cookies_loader import BaseMainPageCookiesLoader
IPP_JS_PATH = 'resources/ipp.js'
class PagesRepository(BaseRepository, BaseMainPageCookiesLoader):
def __init__(self, user_agent: str, cookies: Dict[str, str] = None):
super().__init__(user_agent=user_agent, cookies=cookies)
def get_main_page_cookies(self):
# first we get ipp cookies
self.get_ipp_cookies()
# then we load main page with ipp cookies
# and get more cookies
self.load_main_page()
def get_cookies(self): # override BaseMainPageCookiesLoader's method
return self.get_main_page_cookies()
def get_ipp_cookies(self):
# send request to main page
response = self.load_main_page(False)
# get js for tokens generation
js_code_regex_result = \
re.findall(r'<script type=\"text/javascript\">(.*)</script>', response.text, flags=re.S | re.M)
assert len(js_code_regex_result) > 0
js_code = js_code_regex_result[0]
# edit js to extract cookies from script's result
with open(IPP_JS_PATH, encoding='utf8') as f:
part_of_code = f.read()
js_code = part_of_code + '\r\n' + js_code + '\r\n' + 'document.cookie'
# evaluate js
ipp_cookies_string = dukpy.evaljs(js_code, user_agent=self.user_agent)
# update cookies
ipp_cookies = SimpleCookie()
ipp_cookies.load(ipp_cookies_string)
for k, v in ipp_cookies.items():
self.cookies[k] = v.value
def load_main_page(self, update_cookies=True) -> requests.Response:
http.client._MAXHEADERS = 1000
# send request to main page
response = requests.get(
'https://dns-shop.ru/',
headers={
'accept': '*/*',
'user-agent': self.user_agent,
'accept-language': 'ru-RU,ru;q=0.9,en-US;q=0.8,en;q=0.7',
},
cookies=self.cookies,
)
if update_cookies:
self.update_cookies(response.cookies)
return response
|
Emperator122/dns-shop_price_comparator
|
network/api/pages.py
|
pages.py
|
py
| 2,270 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "network.api.base.BaseRepository",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "network.api.base_mainpage_cookies_loader.BaseMainPageCookiesLoader",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "re.findall",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "re.S",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "re.M",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "dukpy.evaljs",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "http.cookies.SimpleCookie",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "http.cookies.client",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "http.cookies",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "requests.get",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "requests.Response",
"line_number": 50,
"usage_type": "attribute"
}
] |
20794460102
|
import numpy as np
import matplotlib.pyplot as plt
x = []; y = []
for i in range(100):
x.append(np.sin(np.pi/48*i))
y.append(2-2*np.cos(np.pi/48*i))
plt.plot(x, y)
plt.show()
|
duynamrcv/mpc_ros
|
test.py
|
test.py
|
py
| 185 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "numpy.sin",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "numpy.cos",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 10,
"usage_type": "name"
}
] |
12701288482
|
from collections import deque
import sys
d = deque()
n = int(sys.stdin.readline().rstrip())
for i in range(n):
order = sys.stdin.readline().rstrip().split()
a = order[0]
if a == "push":
d.append(order[1])
elif a == "pop":
if d:
print(d.popleft())
else:
print(-1)
elif a == "size":
print(len(d))
elif a == "empty":
if d:
print(0)
else:
print(1)
elif a == "front":
if d:
print(d[0])
else:
print(-1)
else:
if d:
print(d[-1])
else:
print(-1)
|
MinChoi0129/Algorithm_Problems
|
BOJ_Problems/18258.py
|
18258.py
|
py
| 647 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "collections.deque",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "sys.stdin.readline",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "sys.stdin",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "sys.stdin.readline",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "sys.stdin",
"line_number": 8,
"usage_type": "attribute"
}
] |
73886255546
|
import psycopg2
import os
import http.server
import socketserver
import logging
import sys
class IndexHandler(http.server.SimpleHTTPRequestHandler):
def do_GET(self):
self.path = 'index.html'
return http.server.SimpleHTTPRequestHandler.do_GET(self)
print("start app")
app_port = os.environ.get("APP_PORT")
connection = psycopg2.connect(
user=os.environ.get("DB_USER"),
password=os.environ.get("DB_PASSWORD"),
host=os.environ.get("DB_HOST"),
port=os.environ.get("DB_PORT"),
database=os.environ.get("DB_NAME")
)
cursor = connection.cursor()
with open("index.html", "w") as f:
try:
cursor.execute("SELECT * from test;")
for i in cursor:
f.write(f"{i}\n")
except (Exception, Error) as error:
print(error)
Handler = IndexHandler
with socketserver.TCPServer(("", int(app_port)), Handler) as httpd:
print("Serving at port: ", int(app_port))
httpd.serve_forever()
|
hed854/tp3-kubernetes
|
frontend/app.py
|
app.py
|
py
| 978 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "http.server.server",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "http.server",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "http.server.server.SimpleHTTPRequestHandler.do_GET",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "http.server.server",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "http.server",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "os.environ.get",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "psycopg2.connect",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.environ.get",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "os.environ.get",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "os.environ.get",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "os.environ.get",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "os.environ.get",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "socketserver.TCPServer",
"line_number": 38,
"usage_type": "call"
}
] |
73510645947
|
from collections import defaultdict
N = int(input())
for i in range(N):
forwared = defaultdict(int)
reverse = defaultdict(int)
m,n= map(int,input().split())
nums = []
ans = 0
for i in range(m):
temp = list(map(int,input().split()))
nums.append(temp)
for i in range(m):
for j in range(n):
temp = i-j
temp2 = i+j
forwared[temp]+=nums[i][j]
reverse[temp2]+=nums[i][j]
for i in range(m):
for j in range(n):
sum = forwared[i-j]+reverse[i+j]-nums[i][j]
ans = max(ans,sum)
print(ans)
|
yonaSisay/a2sv-competitive-programming
|
xsum.py
|
xsum.py
|
py
| 627 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "collections.defaultdict",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 5,
"usage_type": "call"
}
] |
2046128502
|
import os
import sys
RASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(RASE_DIR)
import time
import threading
from verify import Verify
from scrapy import cmdline
from multiprocessing import Process
from Bearcat_ProxyPool.settings import SPIDER_TIME
curPath = os.path.abspath(os.path.dirname(__file__))
rootPath = os.path.split(curPath)[0]
sys.path.append(rootPath)
def start_blspider(spider_name, frequency):
args = ['scrapy', 'crawl', spider_name]
while True:
p = Process(target=cmdline.execute, args=(args,))
p.start()
p.join()
time.sleep(frequency)
if __name__ == '__main__':
confs = [{"spider_name": "xici", "frequency": SPIDER_TIME},
{"spider_name": "xila", "frequency": SPIDER_TIME},
{"spider_name": "ihuan", "frequency": SPIDER_TIME},
{"spider_name": "ip3366", "frequency": SPIDER_TIME},
{"spider_name": "nima", "frequency": SPIDER_TIME},
{"spider_name": "kuai", "frequency": SPIDER_TIME}]
for conf in confs:
process = Process(target=start_blspider, args=(conf.get("spider_name"), conf.get("frequency", 0)))
process.start()
proces = threading.Thread(target=Verify().main())
proces.start()
|
yuzhiyizhan/Bearcat_ProxyPool
|
main.py
|
main.py
|
py
| 1,261 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "os.path.dirname",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "sys.path.append",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.path.split",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "sys.path.append",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "multiprocessing.Process",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "scrapy.cmdline.execute",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "scrapy.cmdline",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "Bearcat_ProxyPool.settings.SPIDER_TIME",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "Bearcat_ProxyPool.settings.SPIDER_TIME",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "Bearcat_ProxyPool.settings.SPIDER_TIME",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "Bearcat_ProxyPool.settings.SPIDER_TIME",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "Bearcat_ProxyPool.settings.SPIDER_TIME",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "Bearcat_ProxyPool.settings.SPIDER_TIME",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "multiprocessing.Process",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "verify.Verify",
"line_number": 38,
"usage_type": "call"
}
] |
8454082456
|
from uuid import uuid4
from sqlalchemy import text
from critique_wheel.adapters.sqlalchemy import iam_repository
from critique_wheel.critiques.models.critique import Critique
from critique_wheel.members.models.IAM import MemberStatus
from critique_wheel.members.value_objects import MemberId
from critique_wheel.works.models.work import Work
from critique_wheel.works.value_objects import WorkId
def test_repository_can_save_a_basic_member(
session, active_valid_member, valid_work, valid_critique
):
member = active_valid_member
member.id = MemberId()
repo = iam_repository.SqlAlchemyMemberRepository(session)
assert member.works == []
assert member.critiques == []
repo.add(member)
valid_work.id = WorkId()
valid_work.member_id = member.id
member.add_work(valid_work)
member.add_critique(valid_critique)
session.commit()
rows = list(
session.execute(
text(
"SELECT id, username, email, password, member_type, status FROM members"
)
)
)
assert rows == [
(
member.id.get_uuid(),
member.username,
member.email,
member.password,
member.member_type.value,
member.status.value,
)
]
def test_repository_can_get_a_member_by_id(
session, valid_member, valid_work, valid_critique
):
valid_member.id = MemberId()
valid_member.status = MemberStatus.ACTIVE
member = valid_member
repo = iam_repository.SqlAlchemyMemberRepository(session)
repo.add(member)
valid_work.member_id = member.id
valid_critique.member_id = member.id
member.add_work(valid_work)
member.add_critique(valid_critique)
assert len(member.works) == 1
assert len(member.critiques) == 1
session.commit()
stmt = text('SELECT * FROM "members" WHERE id=:id').bindparams(
id=valid_member.id.get_uuid(),
)
rows = session.execute(stmt).fetchall()
assert len(rows) == 1
retrieved_works = session.query(Work).filter_by(member_id=valid_member.id).all()
assert len(retrieved_works) == 1
assert retrieved_works[0].title == valid_work.title
retrieved_critiques = session.query(Critique).filter_by(member_id=member.id).all()
assert len(retrieved_critiques) == 1
assert retrieved_critiques[0].critique_about == valid_critique.critique_about
assert retrieved_works[0].member_id == member.id
assert retrieved_critiques[0].member_id == member.id
assert repo.get_member_by_id(member.id) == member
assert member in repo.list()
def test_repository_can_get_a_member_by_email(session, valid_member):
valid_member.id = MemberId()
valid_member.status = MemberStatus.ACTIVE
valid_member.email = "[email protected]"
member = valid_member
repo = iam_repository.SqlAlchemyMemberRepository(session)
repo.add(member)
session.commit()
assert repo.get_member_by_email(member.email) == member
def test_resository_can_get_a_member_by_username(session, valid_member):
valid_member.id = MemberId()
valid_member.status = MemberStatus.ACTIVE
valid_member.username = "yet_another_username"
member = valid_member
repo = iam_repository.SqlAlchemyMemberRepository(session)
repo.add(member)
session.commit()
assert repo.get_member_by_username(member.username) == member
def test_resository_can_get_a_list_of_members(
session, valid_member, active_valid_member
):
member = valid_member
member_2 = active_valid_member
valid_member.status = MemberStatus.ACTIVE
repo = iam_repository.SqlAlchemyMemberRepository(session)
repo.add(member)
repo.add(member_2)
session.commit()
assert member and member_2 in repo.list()
def test_repository_returns_None_for_no_member_found(session):
repo = iam_repository.SqlAlchemyMemberRepository(session)
username, email, id = "not_in_db", "[email protected]", uuid4()
assert repo.get_member_by_username(username) is None
assert repo.get_member_by_email(email) is None
assert repo.get_member_by_id(id) is None
|
davidjnevin/ddd_critiquewheel
|
critique_wheel/tests/integration/test_IAM_repository.py
|
test_IAM_repository.py
|
py
| 4,132 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "critique_wheel.members.value_objects.MemberId",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "critique_wheel.adapters.sqlalchemy.iam_repository.SqlAlchemyMemberRepository",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "critique_wheel.adapters.sqlalchemy.iam_repository",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "critique_wheel.works.value_objects.WorkId",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.text",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "critique_wheel.members.value_objects.MemberId",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "critique_wheel.members.models.IAM.MemberStatus.ACTIVE",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "critique_wheel.members.models.IAM.MemberStatus",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "critique_wheel.adapters.sqlalchemy.iam_repository.SqlAlchemyMemberRepository",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "critique_wheel.adapters.sqlalchemy.iam_repository",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.text",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "critique_wheel.works.models.work.Work",
"line_number": 70,
"usage_type": "argument"
},
{
"api_name": "critique_wheel.critiques.models.critique.Critique",
"line_number": 74,
"usage_type": "argument"
},
{
"api_name": "critique_wheel.members.value_objects.MemberId",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "critique_wheel.members.models.IAM.MemberStatus.ACTIVE",
"line_number": 87,
"usage_type": "attribute"
},
{
"api_name": "critique_wheel.members.models.IAM.MemberStatus",
"line_number": 87,
"usage_type": "name"
},
{
"api_name": "critique_wheel.adapters.sqlalchemy.iam_repository.SqlAlchemyMemberRepository",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "critique_wheel.adapters.sqlalchemy.iam_repository",
"line_number": 90,
"usage_type": "name"
},
{
"api_name": "critique_wheel.members.value_objects.MemberId",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "critique_wheel.members.models.IAM.MemberStatus.ACTIVE",
"line_number": 99,
"usage_type": "attribute"
},
{
"api_name": "critique_wheel.members.models.IAM.MemberStatus",
"line_number": 99,
"usage_type": "name"
},
{
"api_name": "critique_wheel.adapters.sqlalchemy.iam_repository.SqlAlchemyMemberRepository",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "critique_wheel.adapters.sqlalchemy.iam_repository",
"line_number": 102,
"usage_type": "name"
},
{
"api_name": "critique_wheel.members.models.IAM.MemberStatus.ACTIVE",
"line_number": 114,
"usage_type": "attribute"
},
{
"api_name": "critique_wheel.members.models.IAM.MemberStatus",
"line_number": 114,
"usage_type": "name"
},
{
"api_name": "critique_wheel.adapters.sqlalchemy.iam_repository.SqlAlchemyMemberRepository",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "critique_wheel.adapters.sqlalchemy.iam_repository",
"line_number": 115,
"usage_type": "name"
},
{
"api_name": "critique_wheel.adapters.sqlalchemy.iam_repository.SqlAlchemyMemberRepository",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "critique_wheel.adapters.sqlalchemy.iam_repository",
"line_number": 124,
"usage_type": "name"
},
{
"api_name": "uuid.uuid4",
"line_number": 125,
"usage_type": "call"
}
] |
18525593045
|
import argparse
import os
import mmcv
import torch
from mmcv.runner import get_dist_info, init_dist, load_checkpoint
from drp.apis import set_random_seed, single_gpu_test
from drp.datasets import build_dataloader, build_dataset
from drp.models import build_model
from drp.datasets.pipelines.utils import get_weight
def parse_args():
parser = argparse.ArgumentParser(description='drp tester')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument('--seed', type=int, default=None, help='random seed')
parser.add_argument(
'--deterministic',
action='store_true',
help='whether to set deterministic options for CUDNN backend.')
parser.add_argument('--out', help='output result pickle file')
parser.add_argument(
'--gpu-collect',
action='store_true',
help='whether to use gpu to collect results')
parser.add_argument(
'--save-path',
default=None,
type=str,
help='path to store images and if not given, will not save image')
parser.add_argument('--tmpdir', help='tmp dir for writing some results')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
cfg = mmcv.Config.fromfile(args.config)
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
cfg.model.pretrained = None
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
rank, _ = get_dist_info()
# set random seeds
if args.seed is not None:
if rank == 0:
print('set random seed to', args.seed)
set_random_seed(args.seed, deterministic=args.deterministic)
# build the dataloader
# TODO: support multiple images per gpu (only minor changes are needed)
dataset = build_dataset(cfg.data.test)
loader_cfg = {
**dict((k, cfg.data[k]) for k in ['workers_per_gpu'] if k in cfg.data),
**dict(
samples_per_gpu=1,
drop_last=False,
shuffle=False,
dist=distributed),
**cfg.data.get('test_dataloader', {})
}
data_loader = build_dataloader(dataset, **loader_cfg)
model = build_model(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
for k,v in model.state_dict().items():
print(f'{k} : {v.shape}')
model.load_state_dict(torch.load(args.checkpoint))
if cfg.edges is not None:
gsea_path, ppi_path, pearson_path = cfg.edges[0], cfg.edges[1], cfg.edges[2]
cell_edges_index, cell_edges_attr = get_weight(gsea_path, ppi_path, pearson_path)
model.update_encoder_buffer(cfg.test_batch_size, cell_edges_attr, cell_edges_index, 18498)
for k,v in model.state_dict().items():
print(f'{k} : {v.shape}')
outputs = single_gpu_test(
model.cuda(),
data_loader)
if rank == 0:
print('')
# print metrics
stats = dataset.evaluate(outputs)
for stat in stats:
print('Eval-{}: {}'.format(stat, stats[stat]))
# save result pickle
if args.out:
print('writing results to {}'.format(args.out))
mmcv.dump(outputs, args.out)
if __name__ == '__main__':
main()
|
yivan-WYYGDSG/AGMI
|
tools/test.py
|
test.py
|
py
| 3,775 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "argparse.ArgumentParser",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "mmcv.Config.fromfile",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "mmcv.Config",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "torch.backends",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "mmcv.runner.init_dist",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "mmcv.runner.get_dist_info",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "drp.apis.set_random_seed",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "drp.datasets.build_dataset",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "drp.datasets.build_dataloader",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "drp.models.build_model",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "torch.load",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "drp.datasets.pipelines.utils.get_weight",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "drp.apis.single_gpu_test",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "mmcv.dump",
"line_number": 114,
"usage_type": "call"
}
] |
26986918036
|
# -*- coding: utf-8 -*-
import base64
import pytest
from nameko_grpc.headers import (
HeaderManager,
check_decoded,
check_encoded,
comma_join,
decode_header,
encode_header,
filter_headers_for_application,
sort_headers_for_wire,
)
class TestEncodeHeader:
def test_binary(self):
assert encode_header(("foo-bin", b"123")) == (
b"foo-bin",
base64.b64encode(b"123"),
)
def test_string_value(self):
assert encode_header(("foo", "123")) == (b"foo", b"123")
class TestDecodeHeader:
def test_binary(self):
assert decode_header((b"foo-bin", base64.b64encode(b"123"))) == (
"foo-bin",
b"123",
)
def test_binary_with_truncated_padding(self):
padded_value = base64.b64encode(b"1234")
assert padded_value.endswith(b"=")
trimmed_value = padded_value[:-2]
assert decode_header((b"foo-bin", trimmed_value)) == (
"foo-bin",
b"1234",
)
def test_string_value(self):
assert decode_header((b"foo", b"123")) == ("foo", "123")
class TestFilterHeadersForApplication:
def test_no_application(self):
headers = [(":status", "1"), ("content-type", "2"), ("grpc-foo", "3")]
assert filter_headers_for_application(headers) == []
def test_all_application(self):
headers = [("foo", "1"), ("bar", "2"), ("baz", "3")]
assert filter_headers_for_application(headers) == headers
def test_filter(self):
headers = [
("foo", "1"),
(":status", "1"),
("bar", "2"),
("content-type", "2"),
("baz", "3"),
]
assert filter_headers_for_application(headers) == [
("foo", "1"),
("bar", "2"),
("baz", "3"),
]
class TestSortHeadersForWire:
def test_empty(self):
unsorted = []
for_wire = []
assert sort_headers_for_wire(unsorted) == for_wire
def test_already_sorted(self):
unsorted = [
(":status", "1"),
("content-type", "2"),
("grpc-foo", "3"),
("other", "4"),
]
for_wire = [
(":status", "1"),
("content-type", "2"),
("grpc-foo", "3"),
("other", "4"),
]
assert sort_headers_for_wire(unsorted) == for_wire
def test_sort(self):
unsorted = [
("content-type", "2"),
(":status", "1"),
("other", "4"),
("grpc-foo", "3"),
]
for_wire = [
(":status", "1"),
("content-type", "2"),
("grpc-foo", "3"),
("other", "4"),
]
assert sort_headers_for_wire(unsorted) == for_wire
def test_multi_sort(self):
unsorted = [
("content-type", "1"),
("te", "2"),
(":status", "3"),
(":authority", "4"),
("other", "5"),
("grpc-foo", "6"),
("grpc-bar", "7"),
("more", "8"),
(":method", "9"),
]
for_wire = [
(":status", "3"),
(":authority", "4"),
(":method", "9"),
("content-type", "1"),
("te", "2"),
("grpc-foo", "6"),
("grpc-bar", "7"),
("other", "5"),
("more", "8"),
]
assert sort_headers_for_wire(unsorted) == for_wire
class TestCheckEncoded:
def test_empty(self):
assert check_encoded([]) is None
def test_good(self):
assert check_encoded([(b"foo", b"bar")]) is None
def test_bad_name(self):
with pytest.raises(AssertionError):
check_encoded([("foo", b"bar")])
def test_bad_value(self):
with pytest.raises(AssertionError):
check_encoded([(b"foo", "bar")])
class TestCheckDecoded:
def test_empty(self):
assert check_decoded([]) is None
def test_good(self):
assert check_decoded([("foo", "bar")]) is None
def test_bad_name(self):
with pytest.raises(AssertionError):
check_decoded([(b"foo", "bar")])
def test_bad_value(self):
with pytest.raises(AssertionError):
check_decoded([("foo", b"bar")])
def test_good_binary(self):
assert check_decoded([("foo-bin", b"bar")]) is None
def test_bad_binary(self):
with pytest.raises(AssertionError):
check_decoded([("foo-bin", "bar")])
class TestCommaJoin:
def test_string(self):
assert comma_join(["foo", "bar"]) == "foo,bar"
def test_bytes(self):
assert comma_join([b"foo", b"bar"]) == b"foo,bar"
def test_mixed(self):
with pytest.raises(TypeError):
comma_join([b"foo", "bar"])
class TestHeaderManager:
def test_get(self):
manager = HeaderManager()
manager.set(("foo", "bar"))
assert manager.get("foo") == "bar"
def test_get_with_default(self):
manager = HeaderManager()
assert manager.get("foo", "baz") == "baz"
def test_get_multi(self):
manager = HeaderManager()
manager.set(("foo", "bar"), ("foo", "baz"))
assert manager.get("foo") == "bar,baz"
def test_set(self):
manager = HeaderManager()
manager.set(("x", "y"), ("foo", "bar"))
assert manager.get("x") == "y"
assert manager.get("foo") == "bar"
manager.set(("foo", "baz")) # clears existing foo, only
assert manager.get("foo") == "baz"
assert manager.get("x") == "y"
def test_set_from_wire(self):
manager = HeaderManager()
manager.set((b"foo", b"bar"), from_wire=True)
assert manager.get("foo") == "bar"
def test_append(self):
manager = HeaderManager()
manager.set(("x", "y"), ("foo", "bar"))
assert manager.get("x") == "y"
assert manager.get("foo") == "bar"
manager.append(("foo", "baz")) # appends to foo
assert manager.get("foo") == "bar,baz"
assert manager.get("x") == "y"
def test_append_from_wire(self):
manager = HeaderManager()
manager.set(("foo", "bar"))
manager.append((b"foo", b"baz"), from_wire=True)
assert manager.get("foo") == "bar,baz"
def test_for_wire(self):
manager = HeaderManager()
manager.set(("x", "y"), ("foo", "bar"))
assert manager.for_wire == [(b"x", b"y"), (b"foo", b"bar")]
|
nameko/nameko-grpc
|
test/test_headers.py
|
test_headers.py
|
py
| 6,536 |
python
|
en
|
code
| 57 |
github-code
|
6
|
[
{
"api_name": "nameko_grpc.headers.encode_header",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "base64.b64encode",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "nameko_grpc.headers.encode_header",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "nameko_grpc.headers.decode_header",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "base64.b64encode",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "base64.b64encode",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "nameko_grpc.headers.decode_header",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "nameko_grpc.headers.decode_header",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "nameko_grpc.headers.filter_headers_for_application",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "nameko_grpc.headers.filter_headers_for_application",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "nameko_grpc.headers.filter_headers_for_application",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "nameko_grpc.headers.sort_headers_for_wire",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "nameko_grpc.headers.sort_headers_for_wire",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "nameko_grpc.headers.sort_headers_for_wire",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "nameko_grpc.headers.sort_headers_for_wire",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "nameko_grpc.headers.check_encoded",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "nameko_grpc.headers.check_encoded",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "pytest.raises",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "nameko_grpc.headers.check_encoded",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "pytest.raises",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "nameko_grpc.headers.check_encoded",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "nameko_grpc.headers.check_decoded",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "nameko_grpc.headers.check_decoded",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "pytest.raises",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "nameko_grpc.headers.check_decoded",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "pytest.raises",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "nameko_grpc.headers.check_decoded",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "nameko_grpc.headers.check_decoded",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "pytest.raises",
"line_number": 171,
"usage_type": "call"
},
{
"api_name": "nameko_grpc.headers.check_decoded",
"line_number": 172,
"usage_type": "call"
},
{
"api_name": "nameko_grpc.headers.comma_join",
"line_number": 177,
"usage_type": "call"
},
{
"api_name": "nameko_grpc.headers.comma_join",
"line_number": 180,
"usage_type": "call"
},
{
"api_name": "pytest.raises",
"line_number": 183,
"usage_type": "call"
},
{
"api_name": "nameko_grpc.headers.comma_join",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "nameko_grpc.headers.HeaderManager",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": "nameko_grpc.headers.HeaderManager",
"line_number": 195,
"usage_type": "call"
},
{
"api_name": "nameko_grpc.headers.HeaderManager",
"line_number": 200,
"usage_type": "call"
},
{
"api_name": "nameko_grpc.headers.HeaderManager",
"line_number": 206,
"usage_type": "call"
},
{
"api_name": "nameko_grpc.headers.HeaderManager",
"line_number": 216,
"usage_type": "call"
},
{
"api_name": "nameko_grpc.headers.HeaderManager",
"line_number": 222,
"usage_type": "call"
},
{
"api_name": "nameko_grpc.headers.HeaderManager",
"line_number": 232,
"usage_type": "call"
},
{
"api_name": "nameko_grpc.headers.HeaderManager",
"line_number": 239,
"usage_type": "call"
}
] |
5200005552
|
"""
The production code for predicting smell events and sending push notifications
(sending push notifications requires the rake script in the smell-pittsburgh-rails repository)
"""
import sys
from util import log, generateLogger, computeMetric, isFileHere
import pandas as pd
from getData import getData
from preprocessData import preprocessData
from computeFeatures import computeFeatures
#from selectFeatures import selectFeatures
from trainModel import trainModel
import joblib
from datetime import timedelta, datetime
import os
import subprocess
# The flag to determine the server type
SERVER = "staging"
#SERVER = "production"
# The flag for enabling the rake call to send push notifications
#ENABLE_RAKE_CALL = False
ENABLE_RAKE_CALL = True
# The path for storing push notification data
DATA_PATH = "data_production/"
def main(argv):
mode = None
if len(argv) >= 2:
mode = argv[1]
if mode == "train":
train()
elif mode == "predict":
predict()
else:
print("Use 'python main.py [mode]'; mode can be 'train' or 'predict'")
def train(f_hr=8, b_hr=3, thr=40, method="HCR"):
"""
Train the machine learning model for predicting smell events
Input:
f_hr: the number of hours to look further and compute responses (Y),
...which is the sum of smell ratings (that are larger than 3) over the future f_hr hours
b_hr: the number of hours to look back and compute features (X),
...which are the sensor readings (on ESDR) over the past b_hr hours
thr: the threshold for binning the smell value into two classes (for classification)
method: the method used in the "trainModel.py" file
"""
p = DATA_PATH
# Set logger
logger = generateLogger(p+"log.log")
log("--------------------------------------------------------------------------", logger)
log("--------------------------------- Train --------------------------------", logger)
# Get and preprocess data
end_dt = datetime.now() - timedelta(hours=24)
start_dt = end_dt - timedelta(hours=8000)
log("Get data from " + str(start_dt) + " to " + str(end_dt), logger)
df_esdr_array_raw, df_smell_raw = getData(start_dt=start_dt, end_dt=end_dt, logger=logger)
df_esdr, df_smell = preprocessData(df_esdr_array_raw=df_esdr_array_raw, df_smell_raw=df_smell_raw, logger=logger)
# Compute features
df_X, df_Y, df_C = computeFeatures(df_esdr=df_esdr, df_smell=df_smell, f_hr=f_hr, b_hr=b_hr, thr=thr, is_regr=False,
add_inter=False, add_roll=False, add_diff=False, logger=logger, out_p_mean=p+"mean.csv", out_p_std=p+"std.csv")
# Select features
# NOTE: currently, the best model uses all the features
#df_X, df_Y = selectFeatures(df_X, df_Y, logger=logger, out_p=p+"feat_selected.csv")
# Train, save, and evaluate model
# NOTE: to know more about the model, see the "HybridCrowdClassifier.py" file
model = trainModel({"X": df_X, "Y": df_Y, "C": df_C}, method=method, out_p=p+"model.pkl", logger=logger)
metric = computeMetric(df_Y, model.predict(df_X, df_C), False)
for m in metric:
log(metric[m], logger)
def predict(f_hr=8, b_hr=3, thr=40):
"""
Predict smell events using the trained machine learning model
For the description of the input arguments, see the docstring in the train() function
"""
p = DATA_PATH
# Set logger
logger = generateLogger(p+"log.log")
log("--------------------------------------------------------------------------", logger)
log("-------------------------------- Predict -------------------------------", logger)
# Get data for previous b_hr hours
end_dt = datetime.now()
start_dt = end_dt - timedelta(hours=b_hr+1)
log("Get data from " + str(start_dt) + " to " + str(end_dt), logger)
df_esdr_array_raw, df_smell_raw = getData(start_dt=start_dt, end_dt=end_dt, logger=logger)
df_esdr, df_smell = preprocessData(df_esdr_array_raw=df_esdr_array_raw, df_smell_raw=df_smell_raw, logger=logger)
if len(df_esdr) < b_hr+1:
log("ERROR: Length of esdr is less than " + str(b_hr+1) + " hours", logger)
log("Length of esdr = " + str(len(df_esdr)), logger)
return
# Compute features
df_X, _, df_C = computeFeatures(df_esdr=df_esdr, df_smell=df_smell, f_hr=f_hr, b_hr=b_hr, thr=thr, is_regr=False,
add_inter=False, add_roll=False, add_diff=False, logger=logger, in_p_mean=p+"mean.csv", in_p_std=p+"std.csv")
if len(df_X) != 1:
log("ERROR: Length of X is not 1", logger)
log("Length of X = " + str(len(df_X)), logger)
return
# Select features
# NOTE: currently, the best model uses all the features
#df_feat_selected = pd.read_csv(p+"feat_selected.csv")
#df_X = df_X[df_feat_selected.columns]
# Load model
log("Load model...", logger)
model = joblib.load(p+"model.pkl")
# Predict result
# For the hybrid crowd classifier
# if pred==0, no event
# if pred==1, event predicted by the base estimator
# if pred==2, event detected by the crowd
# if pred==3, event both predicted by the base estimator and detected by the crowd
y_pred = model.predict(df_X, df_C)[0]
log("Prediction for " + str(end_dt) + " is " + str(y_pred), logger)
# Send the predictive push notification
if y_pred in (1, 3): pushType1(end_dt, logger)
# Send the crowd-based notification
# NOTE: comment out the next line when migrating its function to the rails server
if y_pred in (2, 3): pushType2(end_dt, logger)
def pushType1(end_dt, logger):
"""
Send type 1 push notification (predicted by the classifier)
Input:
end_dt: the ending time for getting the ESDR data that is used for prediction (which is the current time)
logger: the python logger created by the generateLogger() function
"""
p = DATA_PATH
# Read the push notification file
nst_p = p + "notification_sent_times.csv"
if isFileHere(nst_p):
# If the file that stores the notification sending time exist,
# ...load the times and check if we already sent the notification at the same date before
df_nst = pd.read_csv(nst_p, parse_dates=["DateTime"])
last_date = df_nst["DateTime"].dt.date.iloc[-1] # the last date that we send the notification
current_date = end_dt.date()
if current_date == last_date: # check if the dates (only year, month, and day) match
# We already sent push notifications to users today, do not send it again until next day
log("Ignore this prediction because we already sent a push notification today", logger)
return
else:
# If the file did not exist, create a new pandas Dataframe to store the time when we send notifications
df_nst = pd.DataFrame(data=[], columns=["DateTime"])
# Send push notification to users
if ENABLE_RAKE_CALL:
# NOTE: Python by default uses the sh terminal but we want it to use bash,
# ...because "source" and "bundle" only works for bash on the Hal11 machine
# ...(on the sh terminal we will want to use "." instead of "source")
cmd = 'source /etc/profile ; cd /var/www/rails-apps/smellpgh/' + SERVER + '/current/ ; bundle exec rake firebase_push_notification:send_prediction["/topics/SmellReports"] RAILS_ENV=' + SERVER + ' >> /var/www/smell-pittsburgh-prediction/py/prediction/data_production/push.log 2>&1'
subprocess.call(["bash", "-c", cmd])
# Create a CSV file that writes the time when the system send the push notification
log("A prediction push notification was sent to users", logger)
df_nst = df_nst.append({"DateTime": end_dt}, ignore_index=True) # append a row to the pandas Dataframe
df_nst.to_csv(nst_p, index=False) # re-write the Dataframe to a CSV file
def pushType2(end_dt, logger):
"""
Send type 2 push notification (verified by the crowd)
Input:
end_dt: the ending time for getting the ESDR data that is used for prediction (which is the current time)
logger: the python logger created by the generateLogger() function
"""
p = DATA_PATH
# Read the crowd push notification file
cvnst_p = p + "crow_verified_notification_sent_times.csv"
if isFileHere(cvnst_p):
# If the file that stores the notification sending time exist,
# ...load the times and check if we already sent the notification at the same date before
df_cvnst = pd.read_csv(cvnst_p, parse_dates=["DateTime"])
last_date = df_cvnst["DateTime"].dt.date.iloc[-1] # the last date that we send the notification
current_date = end_dt.date()
if current_date == last_date: # check if the dates (only year, month, and day) match
# We already sent crowd-verified push notifications to users today, do not send it again until next day
log("Ignore this crowd-verified event because we already sent a push notification today", logger)
return
else:
# If the file did not exist, create a new pandas Dataframe to store the time when we send notifications
df_cvnst = pd.DataFrame(data=[], columns=["DateTime"])
# Send crowd-verified push notification to users
if ENABLE_RAKE_CALL:
# NOTE: Python by default uses the sh terminal but we want it to use bash,
# ...because "source" and "bundle" only works for bash on the Hal11 machine
# ...(on the sh terminal we will want to use "." instead of "source")
cmd = 'source /etc/profile ; cd /var/www/rails-apps/smellpgh/' + SERVER + '/current/ ; bundle exec rake firebase_push_notification:send_prediction_type2["/topics/SmellReports"] RAILS_ENV=' + SERVER + ' >> /var/www/smell-pittsburgh-prediction/py/prediction/data_production/crow_verified_push.log 2>&1'
subprocess.call(["bash", "-c", cmd])
# Create a CSV file that writes the time when the system send the push notification
log("A crowd-verified push notification was sent to users", logger)
df_cvnst = df_cvnst.append({"DateTime": end_dt}, ignore_index=True) # append a row to the pandas Dataframe
df_cvnst.to_csv(cvnst_p, index=False) # re-write the Dataframe to a CSV file
if __name__ == "__main__":
main(sys.argv)
|
CMU-CREATE-Lab/smell-pittsburgh-prediction
|
py/prediction/production.py
|
production.py
|
py
| 10,561 |
python
|
en
|
code
| 6 |
github-code
|
6
|
[
{
"api_name": "util.generateLogger",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "util.log",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "util.log",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "datetime.timedelta",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "util.log",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "getData.getData",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "preprocessData.preprocessData",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "computeFeatures.computeFeatures",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "trainModel.trainModel",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "util.computeMetric",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "util.log",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "util.generateLogger",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "util.log",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "util.log",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 101,
"usage_type": "name"
},
{
"api_name": "datetime.timedelta",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "util.log",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "getData.getData",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "preprocessData.preprocessData",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "util.log",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "util.log",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "computeFeatures.computeFeatures",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "util.log",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "util.log",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "util.log",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "joblib.load",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "util.log",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "util.isFileHere",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "util.log",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "subprocess.call",
"line_number": 177,
"usage_type": "call"
},
{
"api_name": "util.log",
"line_number": 180,
"usage_type": "call"
},
{
"api_name": "util.isFileHere",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 200,
"usage_type": "call"
},
{
"api_name": "util.log",
"line_number": 205,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 209,
"usage_type": "call"
},
{
"api_name": "subprocess.call",
"line_number": 217,
"usage_type": "call"
},
{
"api_name": "util.log",
"line_number": 220,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 226,
"usage_type": "attribute"
}
] |
911733982
|
import pdfplumber
import os,re
file_path = "/home/FDDC_announcements_round2_train_pdf/"
def pdf_tbl2txt(file):
pdf = pdfplumber.open(file_path + "my.pdf")
for i in pdf.pages:
# page = pdf.pages[0]
# i.extract_table()
if i.find_tables(table_settings={}):
i.crop(boundiipng_box)
|
YankeeMarco/aliyun-FDDC-2018-Financial-Challenge-
|
pdf_to_text_with_table_tags.py
|
pdf_to_text_with_table_tags.py
|
py
| 322 |
python
|
en
|
code
| 14 |
github-code
|
6
|
[
{
"api_name": "pdfplumber.open",
"line_number": 6,
"usage_type": "call"
}
] |
7705288630
|
import json
import torch
from transformers import GPT2Tokenizer
from transformers import GPT2DoubleHeadsModel
from MTDNN import MTDNN
from tqdm import trange, tqdm
from keras_preprocessing import sequence
import pandas as pd
import Utils
import pickle
import os
from torch.utils.data import TensorDataset, DataLoader
from torch.utils.tensorboard import SummaryWriter
import datetime
SPECIAL_TOKENS = ['<pad>', '<eos>', '<rstokn>', '<bos>', '<question>', '<commonsensetask>', '<cose>', '<openbook>']
ATTR_TO_SPECIAL_TOKEN = {'bos_token': '<bos>', 'pad_token': '<pad>', 'eos_token': '<eos>',
'additional_special_tokens': ['<rstokn>', '<question>', '<reply>', '<commonsensetask>', '<cose>', '<openbook>']
}
current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
logs_dir_tensorboard = "runs2nomcs/" + (str(current_time) + "morecheckpoint-melco-update-ros")
writer = SummaryWriter(logs_dir_tensorboard)
device = 'cuda:5'
def data_preprocess():
final_data = []
questions = []
choices = []
label = []
facts = []
file_name = 'data/OpenBookFacts/train_complete.jsonl'
for line in open(file_name, 'r') :
data = (json.loads(line))
questions.append(data['question']['stem'])
choices.append([data['question']['choices'][0]['text'], data['question']['choices'][1]['text'],
data['question']['choices'][2]['text'], data['question']['choices'][3]['text']])
if data['answerKey'] == 'A' :
answer = 0
elif data['answerKey'] == 'B' :
answer = 1
elif data['answerKey'] == 'C' :
answer = 2
else:
answer = 3
label.append(answer)
facts.append(data['fact1'])
openBook_Data = [["openBook"], questions, choices, label, facts]
final_data.append(openBook_Data)
file_name = 'data/CoS-E/cose_train_data.csv'
data = pd.read_csv(file_name)
final_data.append([["CoS-E"], data])
file_name_1 = 'data/commonsense/subtaskB_data_all-2.csv'
file_name_2 = 'data/commonsense/subtaskC-alldata.csv'
data1 = pd.read_csv(file_name_1)
data2 = pd.read_csv(file_name_2)
data = data1.merge(data2, on='FalseSent').dropna()
final_data.append([["commonsense"], data]) # leave the last 500
return final_data
def convert_to_tokens(input, tokenizer):
if isinstance(input, str):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(input))
elif isinstance(input, list):
return [
tokenizer.convert_tokens_to_ids(tokenizer.tokenize(val))
if not isinstance(val, int) else val
for val in input
]
elif isinstance(input, pd.Series):
input = input.tolist()
return [
tokenizer.convert_tokens_to_ids(tokenizer.tokenize(val))
if not isinstance(val, int) else val
for val in input
]
else:
import sys
print("Conversion Error")
sys.exit()
def padding_falsesent_choices(datas, tokenizer):
pad = [tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS[0])]
eos = [tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS[1])]
rstokn = [tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS[2])]
bos = [tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS[3])]
questons = [tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS[4])]
commonsensetask = [tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS[5])]
COSE = [tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS[6])]
openBook = [tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS[7])]
choice_padding = -1
input_ids = []
lm_labels = []
token_type_ids = []
mc_token_ids = []
mc_labels = []
max_length = 128
for data in datas:
if data[0] == ["openBook"]:
for question, choices, labels, facts in zip( data[1], data[2], data[3], data[4]):
# /mc_labels = []
question, choices, facts = convert_to_tokens(question, tokenizer), convert_to_tokens(choices, tokenizer), convert_to_tokens(facts, tokenizer)
input1 = [bos + openBook + rstokn + question + rstokn + choices[0] + rstokn + choices[1] + rstokn + choices[2] + rstokn + choices[3] + eos]
input2 = [bos + openBook + rstokn + question + rstokn + facts + eos]
mc_token_ids.append(len(input1[0]))
mc_token_ids.append(len(input2[0]))
input1 = sequence.pad_sequences(input1, maxlen=max_length, padding='post', value=pad)
input_ids.append(input1[0])
fakechoice = sequence.pad_sequences([[-1]], maxlen=max_length, padding='post', value=choice_padding)
lm_labels.append(fakechoice[0])
tt_id1 = [
(len(openBook) + 1) * rstokn + (len(question) + 1) * questons + (len(choices[0]) + 1) * rstokn +
(len(choices[1]) + 1) * questons + (len(choices[2]) + 1) * rstokn + (len(choices[3]) + 2) * questons
]
tt_id1 = sequence.pad_sequences(tt_id1, maxlen=max_length, padding='post', value=pad)
token_type_ids.append(tt_id1[0])
input2 = sequence.pad_sequences(input2, maxlen=max_length, padding='post', value=pad)
input_ids.append(input2[0])
choice = [[-1] * (len(openBook) + 2) + [-1] * len(question) + [-1] + facts + eos]
choice = sequence.pad_sequences(choice, maxlen=max_length, padding='post', value=choice_padding)
lm_labels.append(choice[0])
tt_id2 = [(len(openBook) + 1) * rstokn + (len(question) + 1) * questons + (len(choices[labels]) + 2) * rstokn]
tt_id2 = sequence.pad_sequences(tt_id2, maxlen=max_length, padding='post', value=pad)
token_type_ids.append(tt_id2[0])
mc_labels.append(labels)
mc_labels.append(labels)
elif data[0] == ["CoS-E"]:
for idx, value in data[1].iterrows():
value = value[1:]
value = convert_to_tokens(value, tokenizer)
input1 = [bos + COSE + rstokn + value[1] + rstokn + value[2] + rstokn + value[3] + rstokn + value[4] +
rstokn + value[5] + rstokn + value[6] + eos]
input2 = [bos + COSE + rstokn + value[1] + rstokn + value[8] + eos]
mc_token_ids.append(len(input1[0]))
mc_token_ids.append(len(input2[0]))
input1 = sequence.pad_sequences(input1, maxlen= max_length, padding='post', value=pad)
input_ids.append(input1[0])
fakechoice = sequence.pad_sequences([[-1]], maxlen=max_length, padding='post', value=choice_padding)
lm_labels.append(fakechoice[0])
tt_id1 = [(len(COSE) + 1) * rstokn + (len(value[1]) + 1) * questons + (len(value[2]) + 1) * rstokn +
(len(value[3]) + 1) * questons + (len(value[4]) + 1) * rstokn + (len(value[5]) + 1) * questons +
(len(value[6]) + 2) * rstokn]
tt_id1 = sequence.pad_sequences(tt_id1, maxlen= max_length, padding='post', value=pad)
token_type_ids.append(tt_id1[0])
input2 = sequence.pad_sequences(input2, maxlen=max_length, padding='post', value=pad)
input_ids.append(input2[0])
choice = [[-1] * (len(COSE) + 2) + [-1] * len(value[1]) + [-1] + value[8] + eos]
choice = sequence.pad_sequences(choice, maxlen=max_length, padding='post', value=choice_padding)
lm_labels.append(choice[0])
tt_id2 = [(len(COSE) + 1) * rstokn + (len(value[1]) + 1) * questons + (len(value[8]) +2) * rstokn]
tt_id2 = sequence.pad_sequences(tt_id2, maxlen=max_length, padding='post', value=pad)
token_type_ids.append(tt_id2[0])
mc_labels.append(value[7])
mc_labels.append(value[7])
elif data[0] == ["commonsense"]:
for idx, value in data[1].iterrows():
# call tokenizer
value = convert_to_tokens(value, tokenizer)
input1 = [bos + commonsensetask + rstokn + value[1] + rstokn + value[2] + rstokn + value[3] + rstokn + value[4]+ eos]
ml = input1
input1 = sequence.pad_sequences(input1, maxlen=max_length, padding='post', value=pad)
fakechoice = sequence.pad_sequences([[-1]], maxlen=max_length, padding='post', value=choice_padding)
tt_id1 = [
(len(commonsensetask) + 1) * rstokn + (len(value[1]) + 1) * questons + (len(value[2]) + 1) * rstokn +
(len(value[3]) + 1) * questons + (len(value[4]) + 2) * rstokn
]
tt_id1 = sequence.pad_sequences(tt_id1, maxlen=max_length, padding='post', value=pad)
for i in range(3):
mc_token_ids.append(len(ml))
input_ids.append(input1[0])
lm_labels.append(fakechoice[0])
token_type_ids.append(tt_id1[0])
input2 = [bos + commonsensetask + rstokn + value[1] + rstokn + value[7 + i] + eos]
mc_token_ids.append(len(input2[0]))
input2 = sequence.pad_sequences(input2, maxlen=max_length, padding='post', value=pad)
input_ids.append(input2[0])
choice = [[-1] + [-1] * len(commonsensetask) + [-1] + [-1] * len(value[1]) + [-1] + value[7 + i] + eos]
choice = sequence.pad_sequences(choice, maxlen=max_length, padding='post', value=choice_padding)
lm_labels.append(choice[0])
tt_id2 = [(len(commonsensetask) + 1) * rstokn + (len(value[1]) + 1) * questons + (len(value[7 + i]) + 2) * rstokn]
tt_id2 = sequence.pad_sequences(tt_id2, maxlen=max_length, padding='post', value=pad)
token_type_ids.append(tt_id2[0])
mc_labels.append(value[5])
mc_labels.append(value[5])
# mc_labels.append(0)
return input_ids, token_type_ids, mc_token_ids, lm_labels, mc_labels
def converting_tokens(data, tokenizer):
print("Converting tokens to ids ...", flush=True)
input_ids, token_type_ids, mc_token_ids, lm_labels, mc_labels = padding_falsesent_choices(data, tokenizer)
input_ids = torch.tensor(input_ids)
input_ids = input_ids.view((-1, 2) + input_ids.shape[1:])
mc_token_ids = torch.tensor(mc_token_ids)
mc_token_ids = mc_token_ids.view((-1, 2) + mc_token_ids.shape[1:])
lm_labels = torch.tensor(lm_labels)
lm_labels = lm_labels.view((-1, 2) + lm_labels.shape[1:])
token_type_ids = torch.tensor(token_type_ids)
token_type_ids = token_type_ids.view((-1, 2) + token_type_ids.shape[1:])
mc_labels = torch.tensor(mc_labels)
mc_labels = mc_labels.view((-1, 2) + mc_labels.shape[1:])
pickle.dump(input_ids, open("data/pickle/input_ids.p", "wb"))
pickle.dump(mc_token_ids, open("data/pickle/mc_token_ids.p", "wb"))
pickle.dump(lm_labels, open("data/pickle/lm_labels.p", "wb"))
pickle.dump(token_type_ids, open("data/pickle/token_type_ids.p", "wb"))
pickle.dump(mc_labels, open("data/pickle/mc_labels.p", "wb"))
return input_ids, token_type_ids, mc_token_ids, lm_labels, mc_labels
def train(model, optimizer, scheduler, train_data, output_dir, num_train_epochs, tokenizer, lm_coef, mc_coef,gradient_accumulation_steps, mgpu, temp=[], valid_data = []):
training_loss = {}
evaluation_loss = {}
global_steps = 0
for epochs in range(num_train_epochs):
model.train()
print("Training start for epoch {}".format(epochs), flush=True)
nb_tr_steps, tr_loss = 0, 0
optimizer.zero_grad()
lm_sub_batch_loss, mc_sub_batch_loss, sub_batch_loss = 0, 0, 0
print("sub_batch_loss \t lm_sub_batch_loss \t mc_sub_batch_loss")
for step, batch in (enumerate(train_data)):
model.train()
batch = tuple(t.to(device).type(torch.cuda.LongTensor) for t in batch)
input_ids, token_type_ids, mc_token_ids, lm_labels, mc_labels = batch
lm_loss, mc_loss, *_ = model(
input_ids, token_type_ids=token_type_ids, mc_token_ids=mc_token_ids,
mc_labels=mc_labels, lm_labels=lm_labels, task=input_ids[0][0][1]
)
mc_loss = mc_loss[0]
del input_ids, token_type_ids, mc_token_ids, lm_labels, mc_labels
loss = (lm_loss * lm_coef) + (mc_loss * mc_coef)
loss = loss.mean()
loss /= gradient_accumulation_steps
loss.backward()
tr_loss += loss.item()
lm_sub_batch_loss += lm_loss.item()
mc_sub_batch_loss += mc_loss.item()
sub_batch_loss += loss.item()
if (global_steps + 1) % gradient_accumulation_steps == 0:
optimizer.step()
scheduler.step()
# global_steps +=1
optimizer.zero_grad()
print("{} \t {} \t {}".format(sub_batch_loss, lm_sub_batch_loss/gradient_accumulation_steps, mc_sub_batch_loss/gradient_accumulation_steps))
writer.add_scalar('Training batch loss', sub_batch_loss, global_steps+1)
writer.add_scalar('Training lm batch loss', lm_sub_batch_loss/gradient_accumulation_steps, global_steps+1)
writer.add_scalar('Training mc batch loss', mc_sub_batch_loss/gradient_accumulation_steps, global_steps+1)
training_loss[(global_steps+1)] = (sub_batch_loss, lm_sub_batch_loss/gradient_accumulation_steps, mc_sub_batch_loss/gradient_accumulation_steps)
lm_sub_batch_loss, mc_sub_batch_loss, sub_batch_loss = 0, 0, 0
if (global_steps + 1) % 800 == 0:
eval_loss, eval_lm_loss, eval_mc_loss = evaluate_gpt2(model, valid_data)
print("{} \t {} \t {}".format(eval_mc_loss, eval_lm_loss, eval_mc_loss))
writer.add_scalar('Eval total loss - 100', eval_loss, (global_steps + 1))
writer.add_scalar('Eval total LM loss - 100', eval_lm_loss, (global_steps + 1))
writer.add_scalar('Eval total MC loss - 100', eval_mc_loss, (global_steps + 1))
evaluation_loss[(global_steps + 1)] = (eval_loss, eval_lm_loss, eval_mc_loss)
if not os.path.exists(output_dir + '/' + str(global_steps + 1)):
os.makedirs(output_dir + '/' + str(global_steps + 1))
torch.save(model, output_dir + '/' + str(global_steps + 1) + '/' + str(global_steps + 1) + '.pt')
# model.save_state_dict(output_dir + '/' + str(global_steps + 1))
global_steps += 1
print("Epoch Completed at Step Size {}".format(global_steps))
if not os.path.exists(output_dir + '/' + '_epoch_' + str(epochs)):
os.makedirs(output_dir + '/' + '_epoch_' + str(epochs))
torch.save(model, output_dir + '/' + '_epoch_' + str(epochs) + '/' + str(epochs) + '.pt')
# model.save_state_dict(output_dir + '/' + '_epoch_' + str(epochs))
pickle.dump(training_loss, open("data/pickle/training_loss-melco-update.p", "wb"))
pickle.dump(evaluation_loss, open("data/pickle/evaluation_loss-melco-update.p", "wb"))
return model
def evaluate_gpt2(model, valid_data):
lm_sub_batch_loss, mc_sub_batch_loss = 0, 0
model.eval()
print("\n *************************Evaluation************************************ \n")
for step, batch in (enumerate(valid_data)):
batch = tuple(t.to(device).type(torch.cuda.LongTensor) for t in batch)
input_ids, token_type_ids, mc_token_ids, lm_labels, mc_labels = batch
lm_loss, mc_loss, *_ = model(
input_ids, token_type_ids=token_type_ids, mc_token_ids=mc_token_ids,
mc_labels=mc_labels, lm_labels=lm_labels, task=input_ids[0][0][1]
)
del input_ids, token_type_ids, mc_token_ids, lm_labels, mc_labels
lm_sub_batch_loss += lm_loss.item()
mc_sub_batch_loss += mc_loss[0].item()
return (lm_sub_batch_loss + mc_sub_batch_loss)/len(valid_data), (lm_sub_batch_loss)/len(valid_data), (mc_sub_batch_loss)/len(valid_data)
def main():
flag = True
mgpu = True
output_dir= 'checkpoints-More-melco-new'
epochs = 3
gradient_accumulation_steps = 8
lm_coef, mc_coef = 1, 0
token_class = GPT2Tokenizer
model_Class = MTDNN
gpt_model = model_Class.from_pretrained('omcs/-Final')
# gpt_model = model_Class.from_pretrained('gpt2-large')
gpt_tokenizer = token_class.from_pretrained('omcs/-Final', do_lower_case=True)
# gpt_tokenizer = token_class.from_pretrained('gpt2-large', do_lower_case=True)
gpt_model, gpt_tokenizer = Utils.add_special_tokens(gpt_model, gpt_tokenizer, ATTR_TO_SPECIAL_TOKEN)
gpt_model.to(device)
#gpt_model = torch.nn.DataParallel(gpt_model, output_device=1, device_ids=[0, 1])
cache_input_ids, cache_mc_token_ids, cache_lm_labels, cache_token_type_ids, cache_mc_labels = \
"data/pickle/input_ids.p", "data/pickle/mc_token_ids.p", "data/pickle/lm_labels.p", "data/pickle/token_type_ids.p", "data/pickle/mc_labels.p"
if flag and os.path.exists(cache_input_ids) and os.path.exists(cache_mc_token_ids) and os.path.exists(
cache_lm_labels) and os.path.exists(cache_token_type_ids) and os.path.exists(cache_mc_labels):
print("Token ids loaded from previous processed file ... ", flush=True)
input_ids, mc_token_ids, lm_labels, token_type_ids, mc_labels = pickle.load(open(cache_input_ids, "rb")), pickle.load(open(cache_mc_token_ids, "rb")), \
pickle.load(open(cache_lm_labels, "rb")), pickle.load(open(cache_token_type_ids, "rb")), \
pickle.load(open(cache_mc_labels, "rb"))
else:
data = data_preprocess()
input_ids, token_type_ids, mc_token_ids, lm_labels, mc_labels = converting_tokens(data, gpt_tokenizer)
temp = [input_ids, token_type_ids, mc_token_ids, lm_labels, mc_labels]
train_data, valid_data = Utils.build_dataloader((input_ids, token_type_ids, mc_token_ids, lm_labels, mc_labels))
train_data, valid_data = Utils.generate_batch(train_data, valid_data, 1)
t_total = len(train_data) / epochs
learning_rate, adam_epsilon, weight_decay, warmup_steps = 1e-5, 1e-8, 0, 0
optimizer, scheduler = Utils.optimizer_generater(gpt_model, learning_rate, adam_epsilon, weight_decay, warmup_steps, t_total)
model = train(gpt_model, optimizer, scheduler, train_data, output_dir, epochs, gpt_tokenizer, lm_coef, mc_coef, gradient_accumulation_steps,
mgpu, temp, valid_data)
print("End of execution", flush=True)
output_dir = output_dir + '/' + 'final'
if not os.path.exists(output_dir):
os.makedirs(output_dir)
gpt_tokenizer.save_pretrained(output_dir)
if __name__ == '__main__':
main()
|
anandhperumal/ANA-at-SemEval-2020-Task-4-UNION
|
MTD-NCH.py
|
MTD-NCH.py
|
py
| 19,668 |
python
|
en
|
code
| 5 |
github-code
|
6
|
[
{
"api_name": "datetime.datetime.now",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "torch.utils.tensorboard.SummaryWriter",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "pandas.Series",
"line_number": 77,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "keras_preprocessing.sequence.pad_sequences",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "keras_preprocessing.sequence",
"line_number": 123,
"usage_type": "name"
},
{
"api_name": "keras_preprocessing.sequence.pad_sequences",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "keras_preprocessing.sequence",
"line_number": 126,
"usage_type": "name"
},
{
"api_name": "keras_preprocessing.sequence.pad_sequences",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "keras_preprocessing.sequence",
"line_number": 133,
"usage_type": "name"
},
{
"api_name": "keras_preprocessing.sequence.pad_sequences",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "keras_preprocessing.sequence",
"line_number": 136,
"usage_type": "name"
},
{
"api_name": "keras_preprocessing.sequence.pad_sequences",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "keras_preprocessing.sequence",
"line_number": 140,
"usage_type": "name"
},
{
"api_name": "keras_preprocessing.sequence.pad_sequences",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "keras_preprocessing.sequence",
"line_number": 144,
"usage_type": "name"
},
{
"api_name": "keras_preprocessing.sequence.pad_sequences",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "keras_preprocessing.sequence",
"line_number": 164,
"usage_type": "name"
},
{
"api_name": "keras_preprocessing.sequence.pad_sequences",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "keras_preprocessing.sequence",
"line_number": 167,
"usage_type": "name"
},
{
"api_name": "keras_preprocessing.sequence.pad_sequences",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "keras_preprocessing.sequence",
"line_number": 173,
"usage_type": "name"
},
{
"api_name": "keras_preprocessing.sequence.pad_sequences",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "keras_preprocessing.sequence",
"line_number": 176,
"usage_type": "name"
},
{
"api_name": "keras_preprocessing.sequence.pad_sequences",
"line_number": 180,
"usage_type": "call"
},
{
"api_name": "keras_preprocessing.sequence",
"line_number": 180,
"usage_type": "name"
},
{
"api_name": "keras_preprocessing.sequence.pad_sequences",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "keras_preprocessing.sequence",
"line_number": 184,
"usage_type": "name"
},
{
"api_name": "keras_preprocessing.sequence.pad_sequences",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "keras_preprocessing.sequence",
"line_number": 196,
"usage_type": "name"
},
{
"api_name": "keras_preprocessing.sequence.pad_sequences",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "keras_preprocessing.sequence",
"line_number": 197,
"usage_type": "name"
},
{
"api_name": "keras_preprocessing.sequence.pad_sequences",
"line_number": 202,
"usage_type": "call"
},
{
"api_name": "keras_preprocessing.sequence",
"line_number": 202,
"usage_type": "name"
},
{
"api_name": "keras_preprocessing.sequence.pad_sequences",
"line_number": 213,
"usage_type": "call"
},
{
"api_name": "keras_preprocessing.sequence",
"line_number": 213,
"usage_type": "name"
},
{
"api_name": "keras_preprocessing.sequence.pad_sequences",
"line_number": 217,
"usage_type": "call"
},
{
"api_name": "keras_preprocessing.sequence",
"line_number": 217,
"usage_type": "name"
},
{
"api_name": "keras_preprocessing.sequence.pad_sequences",
"line_number": 221,
"usage_type": "call"
},
{
"api_name": "keras_preprocessing.sequence",
"line_number": 221,
"usage_type": "name"
},
{
"api_name": "torch.tensor",
"line_number": 239,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 242,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 245,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 248,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 251,
"usage_type": "call"
},
{
"api_name": "pickle.dump",
"line_number": 254,
"usage_type": "call"
},
{
"api_name": "pickle.dump",
"line_number": 255,
"usage_type": "call"
},
{
"api_name": "pickle.dump",
"line_number": 256,
"usage_type": "call"
},
{
"api_name": "pickle.dump",
"line_number": 257,
"usage_type": "call"
},
{
"api_name": "pickle.dump",
"line_number": 258,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 278,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 316,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 316,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 317,
"usage_type": "call"
},
{
"api_name": "torch.save",
"line_number": 318,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 322,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 322,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 323,
"usage_type": "call"
},
{
"api_name": "torch.save",
"line_number": 324,
"usage_type": "call"
},
{
"api_name": "pickle.dump",
"line_number": 327,
"usage_type": "call"
},
{
"api_name": "pickle.dump",
"line_number": 328,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 337,
"usage_type": "attribute"
},
{
"api_name": "transformers.GPT2Tokenizer",
"line_number": 358,
"usage_type": "name"
},
{
"api_name": "MTDNN.MTDNN",
"line_number": 359,
"usage_type": "name"
},
{
"api_name": "Utils.add_special_tokens",
"line_number": 366,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 372,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 372,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 373,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 373,
"usage_type": "attribute"
},
{
"api_name": "pickle.load",
"line_number": 375,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 376,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 377,
"usage_type": "call"
},
{
"api_name": "Utils.build_dataloader",
"line_number": 383,
"usage_type": "call"
},
{
"api_name": "Utils.generate_batch",
"line_number": 384,
"usage_type": "call"
},
{
"api_name": "Utils.optimizer_generater",
"line_number": 389,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 395,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 395,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 396,
"usage_type": "call"
}
] |
34042438403
|
from django.db.models import Q
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework.decorators import action
from rest_framework.response import Response
from rest_framework.viewsets import ModelViewSet
from gcloud.core.apis.drf.exceptions import ValidationException
from gcloud.core.apis.drf.viewsets import ApiMixin, permissions
from gcloud.label.models import Label, TemplateLabelRelation
from gcloud.label.serilaziers import LabelSerializer
from gcloud.iam_auth import IAMMeta, get_iam_client, res_factory
from iam.contrib.drf.shortcuts import allow_or_raise_immediate_response
from iam import Subject, Action
iam = get_iam_client()
class LabelViewSet(ApiMixin, ModelViewSet):
queryset = Label.objects.all()
serializer_class = LabelSerializer
permission_classes = [permissions.IsAuthenticated]
filter_backends = [DjangoFilterBackend]
filterset_fields = "__all__"
def list(self, request, *args, **kwargs):
project_id = request.query_params.get("project_id")
if not project_id:
raise ValidationException("project_id should be provided.")
allow_or_raise_immediate_response(
iam=iam,
system=IAMMeta.SYSTEM_ID,
subject=Subject("user", request.user.username),
action=Action(IAMMeta.PROJECT_VIEW_ACTION),
resources=res_factory.resources_for_project(project_id),
)
return super(LabelViewSet, self).list(request, *args, **kwargs)
def update(self, request, *args, **kwargs):
label = self.get_object()
if label.is_default:
raise ValidationException("default label cannot be updated.")
project_id = label.project_id
allow_or_raise_immediate_response(
iam=iam,
system=IAMMeta.SYSTEM_ID,
subject=Subject("user", request.user.username),
action=Action(IAMMeta.PROJECT_EDIT_ACTION),
resources=res_factory.resources_for_project(project_id),
)
return super(LabelViewSet, self).update(request, *args, **kwargs)
def destroy(self, request, *args, **kwargs):
label = self.get_object()
if label.is_default:
raise ValidationException("default label cannot be deleted.")
project_id = label.project_id
allow_or_raise_immediate_response(
iam=iam,
system=IAMMeta.SYSTEM_ID,
subject=Subject("user", request.user.username),
action=Action(IAMMeta.PROJECT_EDIT_ACTION),
resources=res_factory.resources_for_project(project_id),
)
self.perform_destroy(label)
return Response({"result": True, "message": "success"})
@action(methods=["get"], detail=False)
def list_with_default_labels(self, request, *args, **kwargs):
project_id = request.query_params.get("project_id")
if not project_id:
raise ValidationException("project_id should be provided.")
allow_or_raise_immediate_response(
iam=iam,
system=IAMMeta.SYSTEM_ID,
subject=Subject("user", request.user.username),
action=Action(IAMMeta.PROJECT_VIEW_ACTION),
resources=res_factory.resources_for_project(project_id),
)
queryset = Label.objects.filter(Q(project_id=project_id) | Q(is_default=True))
serializer = self.get_serializer(queryset, many=True)
return Response(serializer.data)
@action(methods=["get"], detail=False)
def get_templates_labels(self, request):
return self._fetch_label_or_template_ids(request, fetch_label=True)
@action(methods=["get"], detail=False)
def get_label_template_ids(self, request):
return self._fetch_label_or_template_ids(request, fetch_label=False)
@staticmethod
def _fetch_label_or_template_ids(request, fetch_label):
base_id_name = "template_ids" if fetch_label else "label_ids"
if fetch_label:
fetch_func = TemplateLabelRelation.objects.fetch_templates_labels
else:
fetch_func = TemplateLabelRelation.objects.fetch_label_template_ids
base_ids = request.query_params.get(base_id_name)
if not base_ids:
raise ValidationException("{} must be provided.".format(base_id_name))
project_id = request.query_params.get("project_id")
allow_or_raise_immediate_response(
iam=iam,
system=IAMMeta.SYSTEM_ID,
subject=Subject("user", request.user.username),
action=Action(IAMMeta.PROJECT_VIEW_ACTION),
resources=res_factory.resources_for_project(project_id),
)
base_ids = [int(base_id) for base_id in base_ids.strip().split(",")]
return Response(fetch_func(base_ids))
|
caiyj/bk-sops
|
gcloud/label/viewsets.py
|
viewsets.py
|
py
| 4,792 |
python
|
en
|
code
| null |
github-code
|
6
|
[
{
"api_name": "iam.contrib.drf.shortcuts",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "gcloud.iam_auth.get_iam_client",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "gcloud.core.apis.drf.viewsets.ApiMixin",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "rest_framework.viewsets.ModelViewSet",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "gcloud.label.models.Label.objects.all",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "gcloud.label.models.Label.objects",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "gcloud.label.models.Label",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "gcloud.label.serilaziers.LabelSerializer",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "gcloud.core.apis.drf.viewsets.permissions.IsAuthenticated",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "gcloud.core.apis.drf.viewsets.permissions",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "django_filters.rest_framework.DjangoFilterBackend",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "gcloud.core.apis.drf.exceptions.ValidationException",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "iam.contrib.drf.shortcuts.allow_or_raise_immediate_response",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "iam.contrib.drf.shortcuts",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "gcloud.iam_auth.IAMMeta.SYSTEM_ID",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "gcloud.iam_auth.IAMMeta",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "iam.Subject",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "iam.Action",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "gcloud.iam_auth.IAMMeta.PROJECT_VIEW_ACTION",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "gcloud.iam_auth.IAMMeta",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "gcloud.iam_auth.res_factory.resources_for_project",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "gcloud.iam_auth.res_factory",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "gcloud.core.apis.drf.exceptions.ValidationException",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "iam.contrib.drf.shortcuts.allow_or_raise_immediate_response",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "iam.contrib.drf.shortcuts",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "gcloud.iam_auth.IAMMeta.SYSTEM_ID",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "gcloud.iam_auth.IAMMeta",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "iam.Subject",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "iam.Action",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "gcloud.iam_auth.IAMMeta.PROJECT_EDIT_ACTION",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "gcloud.iam_auth.IAMMeta",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "gcloud.iam_auth.res_factory.resources_for_project",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "gcloud.iam_auth.res_factory",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "gcloud.core.apis.drf.exceptions.ValidationException",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "iam.contrib.drf.shortcuts.allow_or_raise_immediate_response",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "iam.contrib.drf.shortcuts",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "gcloud.iam_auth.IAMMeta.SYSTEM_ID",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_name": "gcloud.iam_auth.IAMMeta",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "iam.Subject",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "iam.Action",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "gcloud.iam_auth.IAMMeta.PROJECT_EDIT_ACTION",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "gcloud.iam_auth.IAMMeta",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "gcloud.iam_auth.res_factory.resources_for_project",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "gcloud.iam_auth.res_factory",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "gcloud.core.apis.drf.exceptions.ValidationException",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "iam.contrib.drf.shortcuts.allow_or_raise_immediate_response",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "iam.contrib.drf.shortcuts",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "gcloud.iam_auth.IAMMeta.SYSTEM_ID",
"line_number": 74,
"usage_type": "attribute"
},
{
"api_name": "gcloud.iam_auth.IAMMeta",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "iam.Subject",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "iam.Action",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "gcloud.iam_auth.IAMMeta.PROJECT_VIEW_ACTION",
"line_number": 76,
"usage_type": "attribute"
},
{
"api_name": "gcloud.iam_auth.IAMMeta",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "gcloud.iam_auth.res_factory.resources_for_project",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "gcloud.iam_auth.res_factory",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "gcloud.label.models.Label.objects.filter",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "gcloud.label.models.Label.objects",
"line_number": 79,
"usage_type": "attribute"
},
{
"api_name": "gcloud.label.models.Label",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "django.db.models.Q",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "rest_framework.decorators.action",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "rest_framework.decorators.action",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "rest_framework.decorators.action",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "gcloud.label.models.TemplateLabelRelation.objects",
"line_number": 95,
"usage_type": "attribute"
},
{
"api_name": "gcloud.label.models.TemplateLabelRelation",
"line_number": 95,
"usage_type": "name"
},
{
"api_name": "gcloud.label.models.TemplateLabelRelation.objects",
"line_number": 97,
"usage_type": "attribute"
},
{
"api_name": "gcloud.label.models.TemplateLabelRelation",
"line_number": 97,
"usage_type": "name"
},
{
"api_name": "gcloud.core.apis.drf.exceptions.ValidationException",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "iam.contrib.drf.shortcuts.allow_or_raise_immediate_response",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "iam.contrib.drf.shortcuts",
"line_number": 103,
"usage_type": "name"
},
{
"api_name": "gcloud.iam_auth.IAMMeta.SYSTEM_ID",
"line_number": 104,
"usage_type": "attribute"
},
{
"api_name": "gcloud.iam_auth.IAMMeta",
"line_number": 104,
"usage_type": "name"
},
{
"api_name": "iam.Subject",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "iam.Action",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "gcloud.iam_auth.IAMMeta.PROJECT_VIEW_ACTION",
"line_number": 106,
"usage_type": "attribute"
},
{
"api_name": "gcloud.iam_auth.IAMMeta",
"line_number": 106,
"usage_type": "name"
},
{
"api_name": "gcloud.iam_auth.res_factory.resources_for_project",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "gcloud.iam_auth.res_factory",
"line_number": 107,
"usage_type": "name"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 110,
"usage_type": "call"
}
] |
31011028990
|
"""
Name: Timothy James Duffy, Kevin Falconett
File: metrics.py
Class: CSc 483; Spring 2023
Project: TextSummarizer
Provides methods to calculate the ROUGE metric and print the results.
"""
# Filter tensorflow warnings.
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
from database import *
from rouge import Rouge
from config import DATABASE_NAME
from summarizer import generate_summary
def get_rouge_scores(num_docs, offset):
"""Gets the rogue scores using documents in the database. Documents can be offset by id."""
# Get articles and summaries from the database.
database = Database(DATABASE_NAME)
articles, summaries = zip(*database.get_data(num_docs, offset))
# Holds the generated summaries.
generated_summaries = []
# Generate summaries for the articles.
for i, article in enumerate(articles):
summary = generate_summary(article)
generated_summaries.append(summary)
print('Actual summary:\n{}'.format(summaries[i]))
print('Generated summary:\n{}\n'.format(generated_summaries[i]))
# Calculate ROUGE scores for the sample set.
rouge = Rouge()
scores = rouge.get_scores(generated_summaries, list(summaries), avg=True)
return scores
def print_scores(scores):
"""Prints the given ROUGE scores in a nice format."""
# Get ROUGE dictionaries. Each contains recall, precision, accuracy scores.
r1 = scores['rouge-1']
r2 = scores['rouge-2']
rl = scores['rouge-l']
# Print out the scores for Rouge-1, Rouge-2, and Rouge-l.
print('Rouge-1\trecall:\t{:.2f}\tprecision:\t{:.2f}\tf1_score:\t{:.2f}'.format(r1['r'], r1['p'], r1['f']))
print('Rouge-2\trecall:\t{:.2f}\tprecision:\t{:.2f}\tf1_score:\t{:.2f}'.format(r2['r'], r2['p'], r2['f']))
print('Rouge-l\trecall:\t{:.2f}\tprecision:\t{:.2f}\tf1_score:\t{:.2f}\n'.format(rl['r'], rl['p'], rl['f']))
def main():
# Prints the ROUGE results for data the model has been trained on.
print('Trained Data ROUGE Scores:')
trained_data = get_rouge_scores(1, 0)
print_scores(trained_data)
# Prints the ROUGE results for data the model has NOT been trained on.
print('Untrained Data ROUGE Scores:')
untrained_data = get_rouge_scores(1, 1200)
print_scores(untrained_data)
if __name__ == '__main__':
main()
|
tjdaz/TextSummarizer
|
metrics.py
|
metrics.py
|
py
| 2,310 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "os.environ",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "config.DATABASE_NAME",
"line_number": 22,
"usage_type": "argument"
},
{
"api_name": "database.get_data",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "summarizer.generate_summary",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "rouge.Rouge",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "rouge.get_scores",
"line_number": 37,
"usage_type": "call"
}
] |
26113055515
|
__authors__ = ["T. Vincent"]
__license__ = "MIT"
__date__ = "28/06/2018"
import logging
import numpy
import weakref
import functools
from typing import Optional
from ....utils.weakref import WeakList
from ... import qt
from .. import items
from ..items import core
from ...colors import rgba
logger = logging.getLogger(__name__)
class _RegionOfInterestBase(qt.QObject):
"""Base class of 1D and 2D region of interest
:param QObject parent: See QObject
:param str name: The name of the ROI
"""
sigAboutToBeRemoved = qt.Signal()
"""Signal emitted just before this ROI is removed from its manager."""
sigItemChanged = qt.Signal(object)
"""Signal emitted when item has changed.
It provides a flag describing which property of the item has changed.
See :class:`ItemChangedType` for flags description.
"""
def __init__(self, parent=None):
qt.QObject.__init__(self)
if parent is not None:
self.setParent(parent)
self.__name = ''
def getName(self):
"""Returns the name of the ROI
:return: name of the region of interest
:rtype: str
"""
return self.__name
def setName(self, name):
"""Set the name of the ROI
:param str name: name of the region of interest
"""
name = str(name)
if self.__name != name:
self.__name = name
self._updated(items.ItemChangedType.NAME)
def _updated(self, event=None, checkVisibility=True):
"""Implement Item mix-in update method by updating the plot items
See :class:`~silx.gui.plot.items.Item._updated`
"""
self.sigItemChanged.emit(event)
def contains(self, position):
"""Returns True if the `position` is in this ROI.
:param tuple[float,float] position: position to check
:return: True if the value / point is consider to be in the region of
interest.
:rtype: bool
"""
return False # Override in subclass to perform actual test
class RoiInteractionMode(object):
"""Description of an interaction mode.
An interaction mode provide a specific kind of interaction for a ROI.
A ROI can implement many interaction.
"""
def __init__(self, label, description=None):
self._label = label
self._description = description
@property
def label(self):
"""Short name"""
return self._label
@property
def description(self):
"""Longer description of the interaction mode"""
return self._description
class InteractionModeMixIn(object):
"""Mix in feature which can be implemented by a ROI object.
This provides user interaction to switch between different
interaction mode to edit the ROI.
This ROI modes have to be described using `RoiInteractionMode`,
and taken into account during interation with handles.
"""
sigInteractionModeChanged = qt.Signal(object)
def __init__(self):
self.__modeId = None
def _initInteractionMode(self, modeId):
"""Set the mode without updating anything.
Must be one of the returned :meth:`availableInteractionModes`.
:param RoiInteractionMode modeId: Mode to use
"""
self.__modeId = modeId
def availableInteractionModes(self):
"""Returns the list of available interaction modes
Must be implemented when inherited to provide all available modes.
:rtype: List[RoiInteractionMode]
"""
raise NotImplementedError()
def setInteractionMode(self, modeId):
"""Set the interaction mode.
:param RoiInteractionMode modeId: Mode to use
"""
self.__modeId = modeId
self._interactiveModeUpdated(modeId)
self.sigInteractionModeChanged.emit(modeId)
def _interactiveModeUpdated(self, modeId):
"""Called directly after an update of the mode.
The signal `sigInteractionModeChanged` is triggered after this
call.
Must be implemented when inherited to take care of the change.
"""
raise NotImplementedError()
def getInteractionMode(self):
"""Returns the interaction mode.
Must be one of the returned :meth:`availableInteractionModes`.
:rtype: RoiInteractionMode
"""
return self.__modeId
def createMenuForInteractionMode(self, parent: qt.QWidget) -> qt.QMenu:
"""Create a menu providing access to the different interaction modes"""
availableModes = self.availableInteractionModes()
currentMode = self.getInteractionMode()
submenu = qt.QMenu(parent)
modeGroup = qt.QActionGroup(parent)
modeGroup.setExclusive(True)
for mode in availableModes:
action = qt.QAction(parent)
action.setText(mode.label)
action.setToolTip(mode.description)
action.setCheckable(True)
if mode is currentMode:
action.setChecked(True)
else:
callback = functools.partial(self.setInteractionMode, mode)
action.triggered.connect(callback)
modeGroup.addAction(action)
submenu.addAction(action)
submenu.setTitle("Interaction mode")
return submenu
class RegionOfInterest(_RegionOfInterestBase, core.HighlightedMixIn):
"""Object describing a region of interest in a plot.
:param QObject parent:
The RegionOfInterestManager that created this object
"""
_DEFAULT_LINEWIDTH = 1.
"""Default line width of the curve"""
_DEFAULT_LINESTYLE = '-'
"""Default line style of the curve"""
_DEFAULT_HIGHLIGHT_STYLE = items.CurveStyle(linewidth=2)
"""Default highlight style of the item"""
ICON, NAME, SHORT_NAME = None, None, None
"""Metadata to describe the ROI in labels, tooltips and widgets
Should be set by inherited classes to custom the ROI manager widget.
"""
sigRegionChanged = qt.Signal()
"""Signal emitted everytime the shape or position of the ROI changes"""
sigEditingStarted = qt.Signal()
"""Signal emitted when the user start editing the roi"""
sigEditingFinished = qt.Signal()
"""Signal emitted when the region edition is finished. During edition
sigEditionChanged will be emitted several times and
sigRegionEditionFinished only at end"""
def __init__(self, parent=None):
# Avoid circular dependency
from ..tools import roi as roi_tools
assert parent is None or isinstance(parent, roi_tools.RegionOfInterestManager)
_RegionOfInterestBase.__init__(self, parent)
core.HighlightedMixIn.__init__(self)
self.__text = None
self._color = rgba('red')
self._editable = False
self._selectable = False
self._focusProxy = None
self._visible = True
self._child = WeakList()
def _connectToPlot(self, plot):
"""Called after connection to a plot"""
for item in self.getItems():
# This hack is needed to avoid reentrant call from _disconnectFromPlot
# to the ROI manager. It also speed up the item tests in _itemRemoved
item._roiGroup = True
plot.addItem(item)
def _disconnectFromPlot(self, plot):
"""Called before disconnection from a plot"""
for item in self.getItems():
# The item could be already be removed by the plot
if item.getPlot() is not None:
del item._roiGroup
plot.removeItem(item)
def _setItemName(self, item):
"""Helper to generate a unique id to a plot item"""
legend = "__ROI-%d__%d" % (id(self), id(item))
item.setName(legend)
def setParent(self, parent):
"""Set the parent of the RegionOfInterest
:param Union[None,RegionOfInterestManager] parent: The new parent
"""
# Avoid circular dependency
from ..tools import roi as roi_tools
if (parent is not None and not isinstance(parent, roi_tools.RegionOfInterestManager)):
raise ValueError('Unsupported parent')
previousParent = self.parent()
if previousParent is not None:
previousPlot = previousParent.parent()
if previousPlot is not None:
self._disconnectFromPlot(previousPlot)
super(RegionOfInterest, self).setParent(parent)
if parent is not None:
plot = parent.parent()
if plot is not None:
self._connectToPlot(plot)
def addItem(self, item):
"""Add an item to the set of this ROI children.
This item will be added and removed to the plot used by the ROI.
If the ROI is already part of a plot, the item will also be added to
the plot.
It the item do not have a name already, a unique one is generated to
avoid item collision in the plot.
:param silx.gui.plot.items.Item item: A plot item
"""
assert item is not None
self._child.append(item)
if item.getName() == '':
self._setItemName(item)
manager = self.parent()
if manager is not None:
plot = manager.parent()
if plot is not None:
item._roiGroup = True
plot.addItem(item)
def removeItem(self, item):
"""Remove an item from this ROI children.
If the item is part of a plot it will be removed too.
:param silx.gui.plot.items.Item item: A plot item
"""
assert item is not None
self._child.remove(item)
plot = item.getPlot()
if plot is not None:
del item._roiGroup
plot.removeItem(item)
def getItems(self):
"""Returns the list of PlotWidget items of this RegionOfInterest.
:rtype: List[~silx.gui.plot.items.Item]
"""
return tuple(self._child)
@classmethod
def _getShortName(cls):
"""Return an human readable kind of ROI
:rtype: str
"""
if hasattr(cls, "SHORT_NAME"):
name = cls.SHORT_NAME
if name is None:
name = cls.__name__
return name
def getColor(self):
"""Returns the color of this ROI
:rtype: QColor
"""
return qt.QColor.fromRgbF(*self._color)
def setColor(self, color):
"""Set the color used for this ROI.
:param color: The color to use for ROI shape as
either a color name, a QColor, a list of uint8 or float in [0, 1].
"""
color = rgba(color)
if color != self._color:
self._color = color
self._updated(items.ItemChangedType.COLOR)
def isEditable(self):
"""Returns whether the ROI is editable by the user or not.
:rtype: bool
"""
return self._editable
def setEditable(self, editable):
"""Set whether the ROI can be changed interactively.
:param bool editable: True to allow edition by the user,
False to disable.
"""
editable = bool(editable)
if self._editable != editable:
self._editable = editable
self._updated(items.ItemChangedType.EDITABLE)
def isSelectable(self):
"""Returns whether the ROI is selectable by the user or not.
:rtype: bool
"""
return self._selectable
def setSelectable(self, selectable):
"""Set whether the ROI can be selected interactively.
:param bool selectable: True to allow selection by the user,
False to disable.
"""
selectable = bool(selectable)
if self._selectable != selectable:
self._selectable = selectable
self._updated(items.ItemChangedType.SELECTABLE)
def getFocusProxy(self):
"""Returns the ROI which have to be selected when this ROI is selected,
else None if no proxy specified.
:rtype: RegionOfInterest
"""
proxy = self._focusProxy
if proxy is None:
return None
proxy = proxy()
if proxy is None:
self._focusProxy = None
return proxy
def setFocusProxy(self, roi):
"""Set the real ROI which will be selected when this ROI is selected,
else None to remove the proxy already specified.
:param RegionOfInterest roi: A ROI
"""
if roi is not None:
self._focusProxy = weakref.ref(roi)
else:
self._focusProxy = None
def isVisible(self):
"""Returns whether the ROI is visible in the plot.
.. note::
This does not take into account whether or not the plot
widget itself is visible (unlike :meth:`QWidget.isVisible` which
checks the visibility of all its parent widgets up to the window)
:rtype: bool
"""
return self._visible
def setVisible(self, visible):
"""Set whether the plot items associated with this ROI are
visible in the plot.
:param bool visible: True to show the ROI in the plot, False to
hide it.
"""
visible = bool(visible)
if self._visible != visible:
self._visible = visible
self._updated(items.ItemChangedType.VISIBLE)
def getText(self) -> str:
"""Returns the currently displayed text for this ROI"""
return self.getName() if self.__text is None else self.__text
def setText(self, text: Optional[str] = None) -> None:
"""Set the displayed text for this ROI.
If None (the default), the ROI name is used.
"""
if self.__text != text:
self.__text = text
self._updated(items.ItemChangedType.TEXT)
def _updateText(self, text: str) -> None:
"""Update the text displayed by this ROI
Override in subclass to custom text display
"""
pass
@classmethod
def showFirstInteractionShape(cls):
"""Returns True if the shape created by the first interaction and
managed by the plot have to be visible.
:rtype: bool
"""
return False
@classmethod
def getFirstInteractionShape(cls):
"""Returns the shape kind which will be used by the very first
interaction with the plot.
This interactions are hardcoded inside the plot
:rtype: str
"""
return cls._plotShape
def setFirstShapePoints(self, points):
"""Initialize the ROI using the points from the first interaction.
This interaction is constrained by the plot API and only supports few
shapes.
"""
raise NotImplementedError()
def creationStarted(self):
"""Called when the ROI creation interaction was started.
"""
pass
def creationFinalized(self):
"""Called when the ROI creation interaction was finalized.
"""
pass
def _updateItemProperty(self, event, source, destination):
"""Update the item property of a destination from an item source.
:param items.ItemChangedType event: Property type to update
:param silx.gui.plot.items.Item source: The reference for the data
:param event Union[Item,List[Item]] destination: The item(s) to update
"""
if not isinstance(destination, (list, tuple)):
destination = [destination]
if event == items.ItemChangedType.NAME:
value = source.getName()
for d in destination:
d.setName(value)
elif event == items.ItemChangedType.EDITABLE:
value = source.isEditable()
for d in destination:
d.setEditable(value)
elif event == items.ItemChangedType.SELECTABLE:
value = source.isSelectable()
for d in destination:
d._setSelectable(value)
elif event == items.ItemChangedType.COLOR:
value = rgba(source.getColor())
for d in destination:
d.setColor(value)
elif event == items.ItemChangedType.LINE_STYLE:
value = self.getLineStyle()
for d in destination:
d.setLineStyle(value)
elif event == items.ItemChangedType.LINE_WIDTH:
value = self.getLineWidth()
for d in destination:
d.setLineWidth(value)
elif event == items.ItemChangedType.SYMBOL:
value = self.getSymbol()
for d in destination:
d.setSymbol(value)
elif event == items.ItemChangedType.SYMBOL_SIZE:
value = self.getSymbolSize()
for d in destination:
d.setSymbolSize(value)
elif event == items.ItemChangedType.VISIBLE:
value = self.isVisible()
for d in destination:
d.setVisible(value)
else:
assert False
def _updated(self, event=None, checkVisibility=True):
if event == items.ItemChangedType.TEXT:
self._updateText(self.getText())
elif event == items.ItemChangedType.HIGHLIGHTED:
style = self.getCurrentStyle()
self._updatedStyle(event, style)
else:
styleEvents = [items.ItemChangedType.COLOR,
items.ItemChangedType.LINE_STYLE,
items.ItemChangedType.LINE_WIDTH,
items.ItemChangedType.SYMBOL,
items.ItemChangedType.SYMBOL_SIZE]
if self.isHighlighted():
styleEvents.append(items.ItemChangedType.HIGHLIGHTED_STYLE)
if event in styleEvents:
style = self.getCurrentStyle()
self._updatedStyle(event, style)
super(RegionOfInterest, self)._updated(event, checkVisibility)
# Displayed text has changed, send a text event
if event == items.ItemChangedType.NAME and self.__text is None:
self._updated(items.ItemChangedType.TEXT, checkVisibility)
def _updatedStyle(self, event, style):
"""Called when the current displayed style of the ROI was changed.
:param event: The event responsible of the change of the style
:param items.CurveStyle style: The current style
"""
pass
def getCurrentStyle(self):
"""Returns the current curve style.
Curve style depends on curve highlighting
:rtype: CurveStyle
"""
baseColor = rgba(self.getColor())
if isinstance(self, core.LineMixIn):
baseLinestyle = self.getLineStyle()
baseLinewidth = self.getLineWidth()
else:
baseLinestyle = self._DEFAULT_LINESTYLE
baseLinewidth = self._DEFAULT_LINEWIDTH
if isinstance(self, core.SymbolMixIn):
baseSymbol = self.getSymbol()
baseSymbolsize = self.getSymbolSize()
else:
baseSymbol = 'o'
baseSymbolsize = 1
if self.isHighlighted():
style = self.getHighlightedStyle()
color = style.getColor()
linestyle = style.getLineStyle()
linewidth = style.getLineWidth()
symbol = style.getSymbol()
symbolsize = style.getSymbolSize()
return items.CurveStyle(
color=baseColor if color is None else color,
linestyle=baseLinestyle if linestyle is None else linestyle,
linewidth=baseLinewidth if linewidth is None else linewidth,
symbol=baseSymbol if symbol is None else symbol,
symbolsize=baseSymbolsize if symbolsize is None else symbolsize)
else:
return items.CurveStyle(color=baseColor,
linestyle=baseLinestyle,
linewidth=baseLinewidth,
symbol=baseSymbol,
symbolsize=baseSymbolsize)
def _editingStarted(self):
assert self._editable is True
self.sigEditingStarted.emit()
def _editingFinished(self):
self.sigEditingFinished.emit()
def populateContextMenu(self, menu: qt.QMenu):
"""Populate a menu used as a context menu"""
pass
class HandleBasedROI(RegionOfInterest):
"""Manage a ROI based on a set of handles"""
def __init__(self, parent=None):
RegionOfInterest.__init__(self, parent=parent)
self._handles = []
self._posOrigin = None
self._posPrevious = None
def addUserHandle(self, item=None):
"""
Add a new free handle to the ROI.
This handle do nothing. It have to be managed by the ROI
implementing this class.
:param Union[None,silx.gui.plot.items.Marker] item: The new marker to
add, else None to create a default marker.
:rtype: silx.gui.plot.items.Marker
"""
return self.addHandle(item, role="user")
def addLabelHandle(self, item=None):
"""
Add a new label handle to the ROI.
This handle is not draggable nor selectable.
It is displayed without symbol, but it is always visible anyway
the ROI is editable, in order to display text.
:param Union[None,silx.gui.plot.items.Marker] item: The new marker to
add, else None to create a default marker.
:rtype: silx.gui.plot.items.Marker
"""
return self.addHandle(item, role="label")
def addTranslateHandle(self, item=None):
"""
Add a new translate handle to the ROI.
Dragging translate handles affect the position position of the ROI
but not the shape itself.
:param Union[None,silx.gui.plot.items.Marker] item: The new marker to
add, else None to create a default marker.
:rtype: silx.gui.plot.items.Marker
"""
return self.addHandle(item, role="translate")
def addHandle(self, item=None, role="default"):
"""
Add a new handle to the ROI.
Dragging handles while affect the position or the shape of the
ROI.
:param Union[None,silx.gui.plot.items.Marker] item: The new marker to
add, else None to create a default marker.
:rtype: silx.gui.plot.items.Marker
"""
if item is None:
item = items.Marker()
color = rgba(self.getColor())
color = self._computeHandleColor(color)
item.setColor(color)
if role == "default":
item.setSymbol("s")
elif role == "user":
pass
elif role == "translate":
item.setSymbol("+")
elif role == "label":
item.setSymbol("")
if role == "user":
pass
elif role == "label":
item._setSelectable(False)
item._setDraggable(False)
item.setVisible(True)
else:
self.__updateEditable(item, self.isEditable(), remove=False)
item._setSelectable(False)
self._handles.append((item, role))
self.addItem(item)
return item
def removeHandle(self, handle):
data = [d for d in self._handles if d[0] is handle][0]
self._handles.remove(data)
role = data[1]
if role not in ["user", "label"]:
if self.isEditable():
self.__updateEditable(handle, False)
self.removeItem(handle)
def getHandles(self):
"""Returns the list of handles of this HandleBasedROI.
:rtype: List[~silx.gui.plot.items.Marker]
"""
return tuple(data[0] for data in self._handles)
def _updated(self, event=None, checkVisibility=True):
"""Implement Item mix-in update method by updating the plot items
See :class:`~silx.gui.plot.items.Item._updated`
"""
if event == items.ItemChangedType.VISIBLE:
for item, role in self._handles:
visible = self.isVisible()
editionVisible = visible and self.isEditable()
if role not in ["user", "label"]:
item.setVisible(editionVisible)
else:
item.setVisible(visible)
elif event == items.ItemChangedType.EDITABLE:
for item, role in self._handles:
editable = self.isEditable()
if role not in ["user", "label"]:
self.__updateEditable(item, editable)
super(HandleBasedROI, self)._updated(event, checkVisibility)
def _updatedStyle(self, event, style):
super(HandleBasedROI, self)._updatedStyle(event, style)
# Update color of shape items in the plot
color = rgba(self.getColor())
handleColor = self._computeHandleColor(color)
for item, role in self._handles:
if role == 'user':
pass
elif role == 'label':
item.setColor(color)
else:
item.setColor(handleColor)
def __updateEditable(self, handle, editable, remove=True):
# NOTE: visibility change emit a position update event
handle.setVisible(editable and self.isVisible())
handle._setDraggable(editable)
if editable:
handle.sigDragStarted.connect(self._handleEditingStarted)
handle.sigItemChanged.connect(self._handleEditingUpdated)
handle.sigDragFinished.connect(self._handleEditingFinished)
else:
if remove:
handle.sigDragStarted.disconnect(self._handleEditingStarted)
handle.sigItemChanged.disconnect(self._handleEditingUpdated)
handle.sigDragFinished.disconnect(self._handleEditingFinished)
def _handleEditingStarted(self):
super(HandleBasedROI, self)._editingStarted()
handle = self.sender()
self._posOrigin = numpy.array(handle.getPosition())
self._posPrevious = numpy.array(self._posOrigin)
self.handleDragStarted(handle, self._posOrigin)
def _handleEditingUpdated(self):
if self._posOrigin is None:
# Avoid to handle events when visibility change
return
handle = self.sender()
current = numpy.array(handle.getPosition())
self.handleDragUpdated(handle, self._posOrigin, self._posPrevious, current)
self._posPrevious = current
def _handleEditingFinished(self):
handle = self.sender()
current = numpy.array(handle.getPosition())
self.handleDragFinished(handle, self._posOrigin, current)
self._posPrevious = None
self._posOrigin = None
super(HandleBasedROI, self)._editingFinished()
def isHandleBeingDragged(self):
"""Returns True if one of the handles is currently being dragged.
:rtype: bool
"""
return self._posOrigin is not None
def handleDragStarted(self, handle, origin):
"""Called when an handler drag started"""
pass
def handleDragUpdated(self, handle, origin, previous, current):
"""Called when an handle drag position changed"""
pass
def handleDragFinished(self, handle, origin, current):
"""Called when an handle drag finished"""
pass
def _computeHandleColor(self, color):
"""Returns the anchor color from the base ROI color
:param Union[numpy.array,Tuple,List]: color
:rtype: Union[numpy.array,Tuple,List]
"""
return color[:3] + (0.5,)
|
silx-kit/silx
|
src/silx/gui/plot/items/_roi_base.py
|
_roi_base.py
|
py
| 27,769 |
python
|
en
|
code
| 106 |
github-code
|
6
|
[
{
"api_name": "logging.getLogger",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "items.ItemChangedType",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "functools.partial",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "items.core.HighlightedMixIn",
"line_number": 187,
"usage_type": "attribute"
},
{
"api_name": "items.core",
"line_number": 187,
"usage_type": "name"
},
{
"api_name": "items.CurveStyle",
"line_number": 200,
"usage_type": "call"
},
{
"api_name": "tools.roi.RegionOfInterestManager",
"line_number": 223,
"usage_type": "attribute"
},
{
"api_name": "tools.roi",
"line_number": 223,
"usage_type": "name"
},
{
"api_name": "items.core.HighlightedMixIn.__init__",
"line_number": 225,
"usage_type": "call"
},
{
"api_name": "items.core.HighlightedMixIn",
"line_number": 225,
"usage_type": "attribute"
},
{
"api_name": "items.core",
"line_number": 225,
"usage_type": "name"
},
{
"api_name": "colors.rgba",
"line_number": 227,
"usage_type": "call"
},
{
"api_name": "utils.weakref.WeakList",
"line_number": 232,
"usage_type": "call"
},
{
"api_name": "tools.roi.RegionOfInterestManager",
"line_number": 262,
"usage_type": "attribute"
},
{
"api_name": "tools.roi",
"line_number": 262,
"usage_type": "name"
},
{
"api_name": "colors.rgba",
"line_number": 346,
"usage_type": "call"
},
{
"api_name": "items.ItemChangedType",
"line_number": 349,
"usage_type": "attribute"
},
{
"api_name": "items.ItemChangedType",
"line_number": 367,
"usage_type": "attribute"
},
{
"api_name": "items.ItemChangedType",
"line_number": 385,
"usage_type": "attribute"
},
{
"api_name": "weakref.ref",
"line_number": 408,
"usage_type": "call"
},
{
"api_name": "items.ItemChangedType",
"line_number": 434,
"usage_type": "attribute"
},
{
"api_name": "typing.Optional",
"line_number": 440,
"usage_type": "name"
},
{
"api_name": "items.ItemChangedType",
"line_number": 447,
"usage_type": "attribute"
},
{
"api_name": "items.ItemChangedType",
"line_number": 503,
"usage_type": "attribute"
},
{
"api_name": "items.ItemChangedType",
"line_number": 507,
"usage_type": "attribute"
},
{
"api_name": "items.ItemChangedType",
"line_number": 511,
"usage_type": "attribute"
},
{
"api_name": "items.ItemChangedType",
"line_number": 515,
"usage_type": "attribute"
},
{
"api_name": "colors.rgba",
"line_number": 516,
"usage_type": "call"
},
{
"api_name": "items.ItemChangedType",
"line_number": 519,
"usage_type": "attribute"
},
{
"api_name": "items.ItemChangedType",
"line_number": 523,
"usage_type": "attribute"
},
{
"api_name": "items.ItemChangedType",
"line_number": 527,
"usage_type": "attribute"
},
{
"api_name": "items.ItemChangedType",
"line_number": 531,
"usage_type": "attribute"
},
{
"api_name": "items.ItemChangedType",
"line_number": 535,
"usage_type": "attribute"
},
{
"api_name": "items.ItemChangedType",
"line_number": 543,
"usage_type": "attribute"
},
{
"api_name": "items.ItemChangedType",
"line_number": 545,
"usage_type": "attribute"
},
{
"api_name": "items.ItemChangedType",
"line_number": 549,
"usage_type": "attribute"
},
{
"api_name": "items.ItemChangedType",
"line_number": 550,
"usage_type": "attribute"
},
{
"api_name": "items.ItemChangedType",
"line_number": 551,
"usage_type": "attribute"
},
{
"api_name": "items.ItemChangedType",
"line_number": 552,
"usage_type": "attribute"
},
{
"api_name": "items.ItemChangedType",
"line_number": 553,
"usage_type": "attribute"
},
{
"api_name": "items.ItemChangedType",
"line_number": 555,
"usage_type": "attribute"
},
{
"api_name": "items.ItemChangedType",
"line_number": 564,
"usage_type": "attribute"
},
{
"api_name": "items.ItemChangedType",
"line_number": 565,
"usage_type": "attribute"
},
{
"api_name": "colors.rgba",
"line_number": 582,
"usage_type": "call"
},
{
"api_name": "items.core.LineMixIn",
"line_number": 583,
"usage_type": "attribute"
},
{
"api_name": "items.core",
"line_number": 583,
"usage_type": "name"
},
{
"api_name": "items.core.SymbolMixIn",
"line_number": 589,
"usage_type": "attribute"
},
{
"api_name": "items.core",
"line_number": 589,
"usage_type": "name"
},
{
"api_name": "items.CurveStyle",
"line_number": 604,
"usage_type": "call"
},
{
"api_name": "items.CurveStyle",
"line_number": 611,
"usage_type": "call"
},
{
"api_name": "{'roi_tools': 'tools.roi'}.__init__",
"line_number": 633,
"usage_type": "call"
},
{
"api_name": "items.Marker",
"line_number": 691,
"usage_type": "call"
},
{
"api_name": "colors.rgba",
"line_number": 692,
"usage_type": "call"
},
{
"api_name": "items.ItemChangedType",
"line_number": 739,
"usage_type": "attribute"
},
{
"api_name": "items.ItemChangedType",
"line_number": 747,
"usage_type": "attribute"
},
{
"api_name": "colors.rgba",
"line_number": 758,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 785,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 786,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 794,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 800,
"usage_type": "call"
}
] |
24528285056
|
import concurrent.futures
import logging
import time
def thread_function(name):
logging.info("Thread %s: starting", name)
time.sleep(2)
logging.info("Thread %s: finishing", name)
class FakeDatabase:
def __init__(self):
self.value = 0
def update(self, name):
logging.info("Thread %s: starting update", name)
local_copy = self.value
local_copy += 1
time.sleep(0.1)
self.value = local_copy
logging.info("Thread %s: finishing update", name)
if __name__ == "__main__":
format = "%(asctime)s: %(message)s"
logging.basicConfig(format=format, level=logging.INFO,
datefmt="%H:%M:%S")
# threads = list()
# for index in range(3):
# logging.info("Main : create and start thread %d.", index)
# x = threading.Thread(target=thread_function, args=(index,))
# threads.append(x)
# x.start()
#
# for index, thread in enumerate(threads):
# logging.info("Main : before joining thread %d.", index)
# thread.join()
# logging.info("Main : thread %d done", index)
#
# with concurrent.futures.ThreadPoolExecutor(max_workers=3) as executor: # replace Max_Workers with number of threads and execute it using .map
# executor.map(thread_function, range(3))
format = "%(asctime)s: %(message)s"
logging.basicConfig(format=format, level=logging.INFO,
datefmt="%H:%M:%S")
database = FakeDatabase()
logging.info("Testing update. Starting value is %d.", database.value)
with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor:
for index in range(2):
executor.submit(database.update, index)
logging.info("Testing update. Ending value is %d.", database.value)
|
hiddenxx/Scripts
|
Learning/LearningThreads.py
|
LearningThreads.py
|
py
| 1,819 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "logging.info",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "logging.basicConfig",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "logging.basicConfig",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "logging.info",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "concurrent.futures.futures.ThreadPoolExecutor",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "concurrent.futures.futures",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "concurrent.futures",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "logging.info",
"line_number": 54,
"usage_type": "call"
}
] |
70777980988
|
import random
import re
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from keras.preprocessing.text import Tokenizer
import torch
def transform_to_index_tensor(pairs,rus_w2i,en_w2i,device):
rus_tensor = []
en_tensor = []
for word in range(len(pairs[0])):
en_tensor.append(en_w2i[pairs[0][word]])
for word in range(len(pairs[1])):
rus_tensor.append(rus_w2i[pairs[1][word]])
return en_tensor,rus_tensor
def split_dataset(pairs,val_size=20.000,test_size=20.000):
pairs = random.sample(pairs,len(pairs))
pairs_test = pairs[:test_size]
pairs_val = pairs[test_size:test_size+val_size]
pairs_train = pairs[test_size+val_size:len(pairs)]
return pairs_train,pairs_val,pairs_test
def custom_index_tokenizer(phrase,w2i,i2w):
for word in phrase:
if word in w2i:
continue
else:
if bool(w2i) == False:
w2i[word] = 0
i2w[0] = word
else:
new_idx = list(i2w)[-1]+1
w2i[word] = new_idx
i2w[new_idx] = word
return w2i,i2w
def filter_double_spaces(word_list):
updated_word_list = []
for w in range(len(word_list)):
if not word_list[w] == '':
updated_word_list.append(word_list[w])
return updated_word_list
def get_data(filename='fra.txt',max_words=-1,plot_res=False):
russian_word_list = []
english_word_list = []
#dictionaries for converting a word to a unique integer and the opposite
russian_word_to_idx = {'<SOS>':0,'<EOS>':1,'<PAD>':2}
english_word_to_idx = {'<SOS>':0,'<EOS>':1,'<PAD>':2}
russian_idx_to_word = {0:'<SOS>',1:'<EOS>',2:'<PAD>'}
english_idx_to_word = {0:'<SOS>',1:'<EOS>',2:'<PAD>'}
pairs = []
#read the dataset from the file
with open(filename, "r", encoding="utf-8") as f:
lines_list = f.read().split("\n")
print("The file total translated words/phrases are: "+str(len(lines_list)))
#get the phrases for each language to a different list
word_counter = 0
for i in range(len(lines_list)):
if not max_words == -1:
word_counter += 1
if word_counter > max_words:
break
try:
lines_list[i].split('\t')[1]
except:
continue
russian_word_list.append(lines_list[i].split('\t')[1])
english_word_list.append(lines_list[i].split('\t')[0])
print("The total english phrases are: " + str(len(english_word_list)))
print("The total russian phrases are: "+str(len(russian_word_list)))
russian_lengths = []
english_lengths = []
russian_words_final = []
english_words_final = []
for phrase in range(len(russian_word_list)):
#remove punc
russian_words = re.sub(r'[^\w\s]', '', russian_word_list[phrase])
english_words = re.sub(r'[^\w\s]', '', english_word_list[phrase])
#to lower case
russian_words = russian_words.lower()
english_words = english_words.lower()
russian_lengths.append(len(russian_words))
english_lengths.append(len(english_words))
#split to space
russian_words = russian_words.split(' ')
english_words = english_words.split(' ')
#filter double spaces
russian_words = filter_double_spaces(russian_words)
english_words = filter_double_spaces(english_words)
#add SOS and EOS tokens
russian_words.insert(0, "<SOS>")
russian_words.append("<EOS>")
english_words.insert(0, "<SOS>")
english_words.append("<EOS>")
pairs.append([english_words,russian_words])
russian_word_to_idx,russian_idx_to_word = custom_index_tokenizer(russian_words,russian_word_to_idx,russian_idx_to_word)
english_word_to_idx, english_idx_to_word = custom_index_tokenizer(english_words, english_word_to_idx,english_idx_to_word)
russian_words_final.append(russian_words)
english_words_final.append(english_words)
if plot_res:
plt.hist(english_lengths, 15, alpha=0.5, label='English Lengths',edgecolor = "black")
plt.hist(russian_lengths, 30, alpha=0.5, label='Russian Lengths',edgecolor = "black")
plt.legend(loc='upper right')
plt.show()
print("Found "+str(len(list(russian_idx_to_word.keys())))+" unique russian words.")
print("Found " + str(len(list(english_idx_to_word.keys()))) + " unique english words.")
return russian_words_final,english_words_final,russian_word_to_idx,russian_idx_to_word,english_word_to_idx,english_idx_to_word,pairs,russian_words_final,english_words_final
|
stefanos50/Seq2Seq-Machine-Translation
|
DataPreprocessing.py
|
DataPreprocessing.py
|
py
| 4,664 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "random.sample",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.hist",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 123,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.hist",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 124,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 125,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 126,
"usage_type": "name"
}
] |
13301085850
|
import requests
from prettytable import PrettyTable
#write this into terminal if using linux or cmd if using windows
#pip3 install Prettytable requests
city = input('Enter City => ')#City name here
api = "your api here"#api of https://openweathermap.org get urs from the website
response = requests.get(f"http://api.openweathermap.org/data/2.5/weather?q={city}&appid={api}")#request a response
#if the response equal 200 = OK thats mean everything is okey
if response:
weather = response.json()["weather"]#get weather info
temp = response.json()["main"]#get temperature info
wind = response.json()["wind"]#get wind info
clouds = response.json()["clouds"]#get clouds info
coord = response.json()["coord"]#get coordinates
weather_cloud = weather[0]["description"]
temp_feels = temp["temp"]
temp_feels_like = temp["feels_like"]
temp_feels_min = temp["temp_min"]
temp_feels_max = temp["temp_max"]
wind_speed = wind["speed"]
clouds_deg = clouds["all"]
coordination_lon = coord["lon"]
coordination_lat= coord["lat"]
#write everything into a table
table = PrettyTable()
table.field_names = ["City",
"weather Description",
"Temperature",
"Feels Like",
"Temperature Min",
"Temperature Max",
"Win Speed",
"Cloud",
"Coordinates Long",
"Coordinates Lat"]
table.add_row([city.capitalize(),
weather_cloud,
temp_feels,
temp_feels_like,
temp_feels_min,
temp_feels_max,
wind_speed,
clouds_deg,
coordination_lon,
coordination_lat])
print(table)
else:
print("Invalid Input")
|
xavian1996/weatherpy
|
main.py
|
main.py
|
py
| 1,968 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "requests.get",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "prettytable.PrettyTable",
"line_number": 35,
"usage_type": "call"
}
] |
23210233427
|
import pandas as pd
from morpheus import SequentialComposition, ParallelComposition
from morpheus.algo.selection import base_selection_algorithm, random_selection_algorithm
from morpheus.utils.encoding import *
from morpheus.utils import debug_print
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
VERBOSITY = 0
def default_dataset(n_features=7, random_state=997):
"""
Generate a dataset to be used in tests.
Returns:
"""
X, y = make_classification(
n_samples=10 ** 3,
n_features=n_features,
n_informative=n_features,
n_repeated=0,
n_redundant=0,
n_clusters_per_class=2,
random_state=random_state,
)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=random_state
)
train = pd.DataFrame(X_train)
train = train.assign(y=y_train)
test = pd.DataFrame(X_test)
test = test.assign(y=y_test)
return train, test
def default_chain(random_state=997):
"""
Default classifier chain.
For use in further tests.
Returns:
"""
train, _ = default_dataset(random_state=random_state)
m_list = default_m_list_for_chain(train.values)
sc = SequentialComposition()
for m in m_list:
sc.add_estimator(m, location="back")
return sc
def default_ensemble(random_state=997):
"""
Default classifier ensmeble.
For use in further tests.
Returns:
"""
train, _ = default_dataset(random_state=random_state)
m_list = default_m_list_for_ensemble(train.values)
pc = ParallelComposition()
for m in m_list:
pc.add_estimator(m)
return pc
def default_m_list_for_chain(data):
targ_ids_1 = [4, 5]
desc_ids_1 = [0, 1, 2]
targ_ids_2 = [7]
desc_ids_2 = [1, 2, 5]
all_desc_ids = [desc_ids_1, desc_ids_2]
all_targ_ids = [targ_ids_1, targ_ids_2]
m_list = []
ids = zip(all_desc_ids, all_targ_ids)
for desc_ids, targ_ids in ids:
msg = """
Learning model with desc ids: {}
targ ids: {}
""".format(
desc_ids, targ_ids
)
print(msg)
if set(targ_ids).issubset({6, 7}):
learner = RandomForestClassifier
elif set(targ_ids).issubset({0, 1, 2, 3, 4, 5}):
learner = RandomForestRegressor
else:
msg = """
Cannot learn mixed (nominal/numeric) models
"""
raise ValueError(msg)
# Learn a model for desc_ids-targ_ids
m = learn_model(data, desc_ids, targ_ids, learner, max_depth=5, n_estimators=5)
m_list.append(m)
return m_list
def default_m_list_for_ensemble(data):
targ_ids_1 = [5]
desc_ids_1 = [0, 1, 2]
targ_ids_2 = [4, 5]
desc_ids_2 = [0, 1, 3]
all_desc_ids = [desc_ids_1, desc_ids_2]
all_targ_ids = [targ_ids_1, targ_ids_2]
m_list = []
ids = zip(all_desc_ids, all_targ_ids)
for desc_ids, targ_ids in ids:
msg = """
Learning model with desc ids: {}
targ ids: {}
""".format(
desc_ids, targ_ids
)
print(msg)
if set(targ_ids).issubset({6, 7}):
learner = RandomForestClassifier
elif set(targ_ids).issubset({0, 1, 2, 3, 4, 5}):
learner = RandomForestRegressor
else:
msg = """
Cannot learn mixed (nominal/numeric) models
"""
raise ValueError(msg)
# Learn a model for desc_ids-targ_ids
m = learn_model(data, desc_ids, targ_ids, learner, max_depth=5, n_estimators=5)
m_list.append(m)
return m_list
def default_m_list_for_mercs(data):
n, m = data.shape
attributes = list(range(m))
metadata = {"nb_atts": m}
settings = {"param": 1, "its": 1}
m_codes = base_selection_algorithm(metadata, settings)
all_desc_ids, all_targ_ids = [], []
for m_code in m_codes:
desc_ids, targ_ids, _ = code_to_query(m_code)
all_desc_ids.append(desc_ids)
all_targ_ids.append(targ_ids)
m_list = []
ids = zip(all_desc_ids, all_targ_ids)
for desc_ids, targ_ids in ids:
msg = """
Learning model with desc ids: {}
targ ids: {}
""".format(
desc_ids, targ_ids
)
print(msg)
if set(targ_ids).issubset(attributes[-1:]):
learner = RandomForestClassifier
elif set(targ_ids).issubset(attributes[:-1]):
learner = RandomForestRegressor
else:
msg = """
Cannot learn mixed (nominal/numeric) models
"""
raise ValueError(msg)
# Learn a model for desc_ids-targ_ids
m = learn_model(data, desc_ids, targ_ids, learner, max_depth=5, n_estimators=5)
m_list.append(m)
return m_list
def random_m_list_for_mercs(data, its=1, fraction=0.3, random_state=997):
n, m = data.shape
attributes = list(range(m))
metadata = {"nb_atts": m}
settings = {"param": 1, "its": its, "fraction": fraction}
m_codes = random_selection_algorithm(metadata, settings, random_state=random_state)
all_desc_ids, all_targ_ids = [], []
for m_code in m_codes:
desc_ids, targ_ids, _ = code_to_query(m_code)
all_desc_ids.append(desc_ids)
all_targ_ids.append(targ_ids)
m_list = []
ids = zip(all_desc_ids, all_targ_ids)
for desc_ids, targ_ids in ids:
msg = """
Learning model with desc ids: {}
targ ids: {}
""".format(
desc_ids, targ_ids
)
debug_print(msg, level=1, V=VERBOSITY)
if set(targ_ids).issubset(attributes[-1:]):
learner = RandomForestClassifier
elif set(targ_ids).issubset(attributes[:-1]):
learner = RandomForestRegressor
else:
msg = """
Cannot learn mixed (nominal/numeric) models
"""
raise ValueError(msg)
# Learn a model for desc_ids-targ_ids
m = learn_model(
data,
desc_ids,
targ_ids,
learner,
max_depth=5,
n_estimators=5,
random_state=random_state,
)
m_list.append(m)
return m_list
def learn_model(data, desc_ids, targ_ids, model, **kwargs):
"""
Learn a model from the data.
The desc ids and targ ids identify which algo task
you should try to learn from the data.
Model is a machine learning method that has a .fit() method.
Args:
data:
desc_ids:
targ_ids:
model:
**kwargs:
Returns:
"""
X, Y = data[:, desc_ids], data[:, targ_ids]
if X.shape[1] == 1:
X = X.ravel()
if Y.shape[1] == 1:
Y = Y.ravel()
try:
clf = model(**kwargs)
clf.fit(X, Y)
except ValueError as e:
print(e)
# Bookkeeping
clf.desc_ids = desc_ids
clf.targ_ids = targ_ids
return clf
|
eliavw/morpheus
|
src/morpheus/tests/basics.py
|
basics.py
|
py
| 7,311 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "sklearn.datasets.make_classification",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "morpheus.SequentialComposition",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "morpheus.ParallelComposition",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "sklearn.ensemble.RandomForestClassifier",
"line_number": 109,
"usage_type": "name"
},
{
"api_name": "sklearn.ensemble.RandomForestRegressor",
"line_number": 111,
"usage_type": "name"
},
{
"api_name": "sklearn.ensemble.RandomForestClassifier",
"line_number": 148,
"usage_type": "name"
},
{
"api_name": "sklearn.ensemble.RandomForestRegressor",
"line_number": 150,
"usage_type": "name"
},
{
"api_name": "morpheus.algo.selection.base_selection_algorithm",
"line_number": 171,
"usage_type": "call"
},
{
"api_name": "sklearn.ensemble.RandomForestClassifier",
"line_number": 192,
"usage_type": "name"
},
{
"api_name": "sklearn.ensemble.RandomForestRegressor",
"line_number": 194,
"usage_type": "name"
},
{
"api_name": "morpheus.algo.selection.random_selection_algorithm",
"line_number": 215,
"usage_type": "call"
},
{
"api_name": "morpheus.utils.debug_print",
"line_number": 233,
"usage_type": "call"
},
{
"api_name": "sklearn.ensemble.RandomForestClassifier",
"line_number": 236,
"usage_type": "name"
},
{
"api_name": "sklearn.ensemble.RandomForestRegressor",
"line_number": 238,
"usage_type": "name"
}
] |
4534058436
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import SPARQLWrapper
# REF [site] >> https://sparqlwrapper.readthedocs.io/en/latest/main.html
def select_example():
sparql = SPARQLWrapper.SPARQLWrapper("http://vocabs.ardc.edu.au/repository/api/sparql/csiro_international-chronostratigraphic-chart_geologic-time-scale-2020")
sparql.setReturnFormat(SPARQLWrapper.JSON)
# Gets the first 3 geological ages from a Geological Timescale database, via a SPARQL endpoint.
sparql.setQuery("""
PREFIX gts: <http://resource.geosciml.org/ontology/timescale/gts#>
SELECT *
WHERE {
?a a gts:Age .
}
ORDER BY ?a
LIMIT 3
"""
)
try:
ret = sparql.queryAndConvert()
for r in ret["results"]["bindings"]:
print(r)
except Exception as ex:
print(ex)
# REF [site] >> https://sparqlwrapper.readthedocs.io/en/latest/main.html
def ask_example():
sparql = SPARQLWrapper.SPARQLWrapper("http://dbpedia.org/sparql")
sparql.setQuery("""
ASK WHERE {
<http://dbpedia.org/resource/Asturias> rdfs:label "Asturias"@es
}
"""
)
sparql.setReturnFormat(SPARQLWrapper.XML)
try:
results = sparql.query().convert()
print(results.toxml())
except Exception as ex:
print(ex)
# REF [site] >> https://sparqlwrapper.readthedocs.io/en/latest/main.html
def construct_example():
sparql = SPARQLWrapper.SPARQLWrapper("http://dbpedia.org/sparql")
sparql.setQuery("""
PREFIX dbo: <http://dbpedia.org/ontology/>
PREFIX sdo: <https://schema.org/>
CONSTRUCT {
?lang a sdo:Language ;
sdo:alternateName ?iso6391Code .
}
WHERE {
?lang a dbo:Language ;
dbo:iso6391Code ?iso6391Code .
FILTER (STRLEN(?iso6391Code)=2) # To filter out non-valid values.
}
LIMIT 3
"""
)
try:
results = sparql.queryAndConvert()
print(results.serialize())
except Exception as ex:
print(ex)
# REF [site] >> https://sparqlwrapper.readthedocs.io/en/latest/main.html
def describe_example():
sparql = SPARQLWrapper.SPARQLWrapper("http://dbpedia.org/sparql")
sparql.setQuery("DESCRIBE <http://dbpedia.org/resource/Asturias>")
try:
results = sparql.queryAndConvert()
print(results.serialize(format="json-ld"))
except Exception as ex:
print(ex)
# REF [site] >> https://sparqlwrapper.readthedocs.io/en/latest/main.html
def update_example():
sparql = SPARQLWrapper.SPARQLWrapper("https://example.org/sparql")
sparql.setHTTPAuth(SPARQLWrapper.DIGEST)
sparql.setCredentials("some-login", "some-password")
sparql.setMethod(SPARQLWrapper.POST)
sparql.setQuery("""
PREFIX dbp: <http://dbpedia.org/resource/>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
WITH <http://example.graph>
DELETE {
dbo:Asturias rdfs:label "Asturies"@ast
}
"""
)
try:
results = sparql.query()
print(results.response.read())
except Exception as ex:
print(ex)
# REF [site] >> https://sparqlwrapper.readthedocs.io/en/latest/main.html
def SPARQLWrapper2_example():
sparql = SPARQLWrapper.SPARQLWrapper2("http://dbpedia.org/sparql")
sparql.setQuery("""
PREFIX dbp: <http://dbpedia.org/resource/>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
SELECT ?label
WHERE {
dbp:Asturias rdfs:label ?label
}
LIMIT 3
"""
)
try:
for result in sparql.query().bindings:
print(f"{result['label'].lang}, {result['label'].value}")
except Exception as ex:
print(ex)
# REF [site] >> https://sparqlwrapper.readthedocs.io/en/latest/main.html
def partial_interpretation_of_results():
sparql = SPARQLWrapper.SPARQLWrapper2("http://example.org/sparql")
sparql.setQuery("""
SELECT ?subj ?prop
WHERE {
?subj ?prop ?obj
}
"""
)
try:
ret = sparql.query()
print(ret.variables) # This is an array consisting of "subj" and "prop".
for binding in ret.bindings:
# Each binding is a dictionary. Let us just print the results.
print(f"{binding['subj'].value}, {binding['subj'].type}")
print(f"{binding['prop'].value}, {binding['prop'].type}")
except Exception as ex:
print(ex)
#-----
sparql.setQuery("""
SELECT ?subj ?obj ?opt
WHERE {
?subj <http://a.b.c> ?obj .
OPTIONAL {
?subj <http://d.e.f> ?opt
}
}
"""
)
try:
ret = sparql.query()
print(ret.variables) # This is an array consisting of "subj", "obj", "opt".
if ("subj", "prop", "opt") in ret:
# There is at least one binding covering the optional "opt", too.
bindings = ret["subj", "obj", "opt"]
# Bindings is an array of dictionaries with the full bindings.
for b in bindings:
subj = b["subj"].value
o = b["obj"].value
opt = b["opt"].value
# Do something nice with subj, o, and opt.
# Another way of accessing to values for a single variable: take all the bindings of the "subj", "obj", "opt".
subjbind = ret.getValues("subj") # An array of Value instances.
objbind = ret.getValues("obj") # An array of Value instances.
optbind = ret.getValues("opt") # An array of Value instances.
except Exception as ex:
print(ex)
def dbpedia_test():
sparql = SPARQLWrapper.SPARQLWrapper("http://dbpedia.org/sparql")
sparql.setReturnFormat(SPARQLWrapper.JSON)
if True:
sparql.setQuery("""
SELECT ?uri ?name ?page ?nick
WHERE {
?uri a foaf:Person ;
foaf:name ?name;
foaf:page ?page;
foaf:nick ?nick.
}
LIMIT 100
"""
)
elif False:
sparql.setQuery("""
SELECT ?name ?birth ?role
WHERE{
?x a foaf:Person ;
dbpprop:fullname ?name;
dbpprop:countryofbirth ?birth;
dbpprop:role ?role.
FILTER regex(?birth, "land$").
FILTER regex(?birth, "^Eng").
FILTER regex(?birth, "England").
} LIMIT 100
"""
)
try:
ret = sparql.queryAndConvert()
print(ret["results"]["bindings"])
except Exception as ex:
print(ex)
def dbpedia_ko_test():
sparql = SPARQLWrapper.SPARQLWrapper("http://ko.dbpedia.org/sparql")
sparql.setReturnFormat(SPARQLWrapper.JSON)
if False:
sparql.setQuery("""
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX foaf: <http://xmlns.com/foaf/0.1/>
PREFIX dbo: <http://dbpedia.org/ontology/>
PREFIX dbp: <http://ko.dbpedia.org/property/>
SELECT DISTINCT ?comment
WHERE {
?s foaf:name ?name;
rdfs:comment ?comment;
dbp:occupation ?occupation.
FILTER(REGEX(STR(?occupation), '정치'))
}
LIMIT 30
"""
)
elif False:
sparql.setQuery("""
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX foaf: <http://xmlns.com/foaf/0.1/>
PREFIX dbo: <http://dbpedia.org/ontology/>
PREFIX dbp: <http://ko.dbpedia.org/property/>
SELECT ?comment, ?relative, ?parent
WHERE {
?s foaf:name ?name;
rdfs:comment ?comment.
FILTER(STR(?name) = '하정우')
OPTIONAL{?relative dbo:relative ?s.}
OPTIONAL{?parent dbo:child ?s.}
}
LIMIT 30
"""
)
elif True:
sparql.setQuery("""
select * where {
?s <http://ko.dbpedia.org/property/장소> ?o
} LIMIT 100
"""
)
elif False:
sparql.setQuery("""
PREFIX dbo: <http://dbpedia.org/ontology/>
PREFIX dbp: <http://ko.dbpedia.org/property/>
PREFIX res: <http://ko.dbpedia.org/resource/>
PREFIX foaf: <http://xmlns.com/foaf/0.1/>
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
select * where {
?s rdf:type foaf:Person.
?s <http://ko.dbpedia.org/property/국가> '대한민국'@ko.
}
"""
)
elif False:
sparql.setQuery("""
PREFIX dbo: <http://dbpedia.org/ontology/>
PREFIX dbp: <http://ko.dbpedia.org/property/>
PREFIX res: <http://ko.dbpedia.org/resource/>
PREFIX foaf: <http://xmlns.com/foaf/0.1/>
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
select count(*) where {
?s rdf:type foaf:Person.
{?s dbp:출생일 ?Bdate.} UNION {?s dbp:사망일 ?Ddate.}
?s dbo:abstract ?abstract.
?s dbp:국적 ?nation.
}
"""
)
try:
ret = sparql.queryAndConvert()
print(ret["results"]["bindings"])
except Exception as ex:
print(ex)
def main():
#select_example()
#ask_example()
#construct_example()
#describe_example()
#update_example()
#SPARQLWrapper2_example()
#partial_interpretation_of_results()
#-----
dbpedia_test()
dbpedia_ko_test()
#--------------------------------------------------------------------
if "__main__" == __name__:
main()
|
sangwook236/SWDT
|
sw_dev/python/ext/test/database/sparqlwrapper_test.py
|
sparqlwrapper_test.py
|
py
| 7,970 |
python
|
en
|
code
| 17 |
github-code
|
6
|
[
{
"api_name": "SPARQLWrapper.SPARQLWrapper",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "SPARQLWrapper.JSON",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "SPARQLWrapper.SPARQLWrapper",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "SPARQLWrapper.XML",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "SPARQLWrapper.SPARQLWrapper",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "SPARQLWrapper.SPARQLWrapper",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "SPARQLWrapper.SPARQLWrapper",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "SPARQLWrapper.DIGEST",
"line_number": 90,
"usage_type": "attribute"
},
{
"api_name": "SPARQLWrapper.POST",
"line_number": 92,
"usage_type": "attribute"
},
{
"api_name": "SPARQLWrapper.SPARQLWrapper2",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "SPARQLWrapper.SPARQLWrapper2",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "SPARQLWrapper.SPARQLWrapper",
"line_number": 187,
"usage_type": "call"
},
{
"api_name": "SPARQLWrapper.JSON",
"line_number": 188,
"usage_type": "attribute"
},
{
"api_name": "SPARQLWrapper.SPARQLWrapper",
"line_number": 225,
"usage_type": "call"
},
{
"api_name": "SPARQLWrapper.JSON",
"line_number": 226,
"usage_type": "attribute"
}
] |
16292938825
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 11 14:27:38 2021
@author: maximelucas
"""
import numpy as np
import matplotlib.pyplot as plt
rho_max=250
v_max=130
long=1000
A=150
B=40000
ga=[0 for i in range(B)]
gb=[0 for i in range(B)]
rho0=[200 for i in range(A//5)]+[0 for i in range(4*A//5)]
deltaX = long/A
deltaT=deltaX * (1/(v_max+1))
rho_critique = 125
def gG(s1, s2):
if (s1 <= 125 and s2 <= 125):
res = f(s1)
elif (s1 >= 125 and s2 >= 125):
res = f(s2)
elif (s1 < 125 and s2 > 125):
res = min(f(s1),f(s2))
else :
res = f(125)
return res
#return f(s1)
def f(rho):
res = rho*v(rho)
return res
#def v(rho):
#res = v_max
#res = (v_max/rho_max)*(rho_max-rho)
# return res
def v(rho):
res = v_max
if rho != 0:
res = np.sqrt(100*((1000/rho)-4))
if res > v_max :
res = v_max
return res
def rho_c(X):
l = (deltaT)/(deltaX)
Y = [0 for i in range(len(X))]
for i in range(1, len(X)-1):
Y[i] = X[i] -( l * (gG(X[i], X[i+1]) - gG(X[i-1], X[i])))
return Y
v = [v(i) for i in range(250)]
plt.plot(v)
#line, = plt.plot(rho0)
#X=rho0
#for i in range(1000):
# X = rho_c(X)
# line.set_ydata(X)
# plt.pause(1e-4)
# plt.draw()
|
Maksime0/Mod-lisation-et-mesures-pour-le-trafic-routier
|
Documents annexes/Godonov/godonov_anime.py
|
godonov_anime.py
|
py
| 1,309 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "numpy.sqrt",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 62,
"usage_type": "name"
}
] |
74787884667
|
# This file is part of "Junya's self learning project about Neural Network."
#
# "Junya's self learning project about Neural Network"
# is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# "Junya's self learning project about Neural Network"
# is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Foobar. If not, see <http://www.gnu.org/licenses/>.
#
# (c) Junya Kaneko <[email protected]>
import numpy as np
from matplotlib import pyplot
import logging
__author__ = 'Junya Kaneko <[email protected]>'
def sigmoid(z, alpha=1, theta=0):
return 1 / (1 + np.exp(-alpha * (z - theta)))
def draw_sigmoid_graphs():
dom = np.array([x / 100 for x in range(-300, 300)])
alphas = [1, 10, 50, 100]
nrows = 2
ncols = int(len(alphas)/2)
for i, alpha in enumerate(alphas):
values = np.array([0.0] * len(dom))
for j, x in enumerate(dom):
values[j] = sigmoid(x, alpha=alpha)
pyplot.subplot(nrows, ncols, i + 1)
pyplot.title('Alpha = %s' % alpha)
pyplot.xlabel('x')
pyplot.ylabel('sigmoid(x)')
pyplot.plot(dom, values)
pyplot.tight_layout()
pyplot.show()
class Element:
def __init__(self, w, alpha, theta):
self._w = w if isinstance(w, np.ndarray) else np.array(w)
self._alpha = alpha
self._theta = theta
def out(self, input, alpha=None, theta=None):
_input = input if isinstance(input, np.ndarray) else np.array(input)
_alpha = alpha if alpha else self._alpha
_theta = theta if theta else self._theta
logging.debug('PARAMETERS (alpha, theta): (%s, %s)' % (_alpha, _theta))
logging.debug('INPUT: (%s), SIGMOID: %s' % (','.join([str(i) for i in _input]), sigmoid(self._w.dot(_input), _alpha, _theta)))
if sigmoid(self._w.dot(_input), _alpha, _theta) > 0.5:
logging.debug('OUTPUT: %s' % 1)
return 1
else:
logging.debug('OUTPUT: %s' % 0)
return 0
def __call__(self, input, alpha=None, theta=None):
return self.out(input, alpha, theta)
class AndElement(Element):
def __init__(self):
super().__init__(w=[1, 1], alpha=1, theta=1.5)
class OrElement(Element):
def __init__(self):
super().__init__(w=[1, 1], alpha=1, theta=0.5)
class XorElement:
def out(self, input):
or_element = OrElement()
i1 = np.array([input[0], -input[1]])
i2 = np.array([-input[0], input[1]])
o1 = or_element(i1)
o2 = or_element(i2)
return or_element([o1, o2])
def __call__(self, input):
return self.out(input)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
draw_sigmoid_graphs()
and_element = AndElement()
or_element = OrElement()
xor_element = XorElement()
assert and_element([1, 1])
assert not and_element([1, 0])
assert not and_element([0, 1])
assert not and_element([0, 0])
assert or_element([1, 1])
assert or_element([1, 0])
assert or_element([0, 1])
assert not or_element([0, 0])
assert not xor_element([1, 1])
assert xor_element([1, 0])
assert xor_element([0, 1])
assert not xor_element([0, 0])
|
junyakaneko/learning-and-neural-network
|
chapter3/problem1.py
|
problem1.py
|
py
| 3,651 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "numpy.exp",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.tight_layout",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "numpy.ndarray",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "numpy.ndarray",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "logging.basicConfig",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 101,
"usage_type": "attribute"
}
] |
8927199604
|
import contextlib
import os
import shutil
import tempfile
import mandrel
import unittest
class TestCase(unittest.TestCase):
def assertIs(self, a, b):
# python 2.6/2.7 compatibility
self.assertTrue(a is b)
@contextlib.contextmanager
def tempdir(dir=None):
"""Context manager that yields a temporary directory. Cleans up afterwards."""
if dir is not None:
dir = os.path.realpath(os.path.expanduser(dir))
path = os.path.realpath(tempfile.mkdtemp(dir=dir))
try:
yield path
finally:
shutil.rmtree(path, ignore_errors=True)
@contextlib.contextmanager
def chdir(path):
"""Context manager that moves to path in context; returns to original dir afterwards."""
start_path = os.path.realpath('.')
try:
os.chdir(os.path.realpath(os.path.expanduser(path)))
yield
finally:
os.chdir(start_path)
@contextlib.contextmanager
def workdir(dir=None):
"""Context manager that creates a temp dir, moves to it, and yields the path.
Moves back to original dir and cleans up afterwards."""
with tempdir(dir) as path:
with chdir(path):
yield path
def refresh_bootstrapper():
if hasattr(mandrel, 'bootstrap'):
reload(mandrel.bootstrap)
else:
__import__('mandrel.bootstrap')
BOOTSTRAP_FILE = 'Mandrel.py'
@contextlib.contextmanager
def bootstrap_scenario(text="", dir=None):
with workdir(dir=dir) as path:
bootstrapper = os.path.join(path, BOOTSTRAP_FILE)
with open(bootstrapper, 'w') as f:
f.write(text)
yield path, bootstrapper
|
ethanrowe/python-mandrel
|
mandrel/test/utils.py
|
utils.py
|
py
| 1,612 |
python
|
en
|
code
| 4 |
github-code
|
6
|
[
{
"api_name": "unittest.TestCase",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "os.path.realpath",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "os.path.expanduser",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path.realpath",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "tempfile.mkdtemp",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "shutil.rmtree",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "contextlib.contextmanager",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "os.path.realpath",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "os.chdir",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "os.path.realpath",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "os.path.expanduser",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "os.chdir",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "contextlib.contextmanager",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "contextlib.contextmanager",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "mandrel.bootstrap",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "contextlib.contextmanager",
"line_number": 52,
"usage_type": "attribute"
}
] |
73676828986
|
import serial,os,sys
ser = serial.Serial('/dev/ttyAMA1', 115200)
drawPath = "/home/ubuntu/my_ws/src/test_odom_rap/drawLcd/drawLcd.py"
byte_list = [0x55, 0x0E, 0x01, 0x02,
int(0 / 256), int(0 % 256),
0,0,
0,0,
0, 0, 1]
k = 0
for i in range(len(byte_list)):
k += byte_list[i]
k = k % 256
byte_list.append(k)
contr_law = b"%c%c%c%c%c%c%c%c%c%c%c%c%c%c" % (byte_list[0], byte_list[1], byte_list[2], byte_list[3],
byte_list[4], byte_list[5], byte_list[6], byte_list[7],
byte_list[8], byte_list[9], byte_list[10], byte_list[11],
byte_list[12], byte_list[13])
ser.write(contr_law)
try:
if ord(ser.read())==85:
flag = False
receive = [ord(ser.read()) for i in range(13)]
bettery = int(27.322*((receive[9] * 256 + receive[10]) /100.0) - 245.9)
print(bettery)
if bettery <0:
bettery = 0
elif bettery>100:
bettery = 100
try:
print(bettery)
os.popen("python "+" "+drawPath+" "+str(bettery)+" 0 0")
except IOError:
res = os.system("sudo chmod 777 /dev/i2c-1&sudo chown root.gpio /dev/gpiomem&sudo chmod g+rw /dev/gpiomem&sudo chmod 777 /dev/spidev0.*")
os.popen("python "+" "+drawPath+" "+str(bettery)+" 0 0")
except TypeError:
print('error')
|
caiyilian/INTELLIGENT-FOOD-DELIVERY-ROBOT
|
树莓派代码/drawLcd/auto_startup.py.py
|
auto_startup.py.py
|
py
| 1,524 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "serial.Serial",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "os.popen",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "os.system",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "os.popen",
"line_number": 36,
"usage_type": "call"
}
] |
18023127994
|
import os
import sqlite3
import shutil
import sys
import psutil
def is_browser_running(browser_name):
for process in psutil.process_iter():
try:
if browser_name in process.name().lower():
return True
except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):
pass
return False
if is_browser_running('firefox'):
print("Firefox가 실행 중입니다. 브라우저를 닫고 다시 시도하세요.")
# 환경에 따른 경로 설정
if os.name == 'nt': # Windows 환경
base_path = "C:/Users"
else: # WSL 환경
base_path = "/mnt/c/Users"
# Windows 사용자 이름
windows_username = "user" # 여기에 실제 Windows 사용자 이름을 입력하세요
# Firefox 프로필 경로
firefox_path = os.path.join(base_path, windows_username, "AppData", "Roaming", "Mozilla", "Firefox")
# Chrome 프로필 경로
chrome_path = os.path.join(base_path, windows_username, "AppData", "Local", "Google", "Chrome", "User Data", "Default", "History")
# SQLite 데이터를 추출하는 함수
def extract_sqlite_data(db_path, query):
try:
conn = sqlite3.connect(db_path)
cursor = conn.cursor()
cursor.execute(query)
rows = cursor.fetchall()
conn.close()
return rows
except Exception as e:
print(f"Error reading {db_path}: {str(e)}")
return []
if not os.access(firefox_path, os.R_OK):
print("Firefox 경로에 접근할 수 없습니다. 권한을 확인하세요.")
# 출력을 파일에 저장할 경로
output_file_path = "linuxweboutput.txt"
# 기존 출력 스트림을 백업하고 파일로 변경
original_stdout = sys.stdout
with open(output_file_path, "w") as f:
sys.stdout = f
# Firefox 인터넷 기록을 추출합니다.
for profile_directory in os.listdir(firefox_path):
if profile_directory.endswith('.default-release'):
places_db = os.path.join(firefox_path, profile_directory, 'places.sqlite')
if os.path.exists(places_db):
temp_copy = os.path.join("/tmp", "temp_places.sqlite")
shutil.copy(places_db, temp_copy)
query = "SELECT url, title, last_visit_date FROM moz_places"
records = extract_sqlite_data(temp_copy, query)
os.remove(temp_copy)
for record in records:
print(record)
# Chrome 인터넷 기록을 추출합니다.
if os.path.exists(chrome_path):
temp_copy = os.path.join("/tmp", "temp_history.sqlite")
shutil.copy(chrome_path, temp_copy)
query = "SELECT url, title, last_visit_time FROM urls"
records = extract_sqlite_data(temp_copy, query)
os.remove(temp_copy)
for record in records:
print(record)
# 기존 출력 스트림을 복원
sys.stdout = original_stdout
|
KIMJOONSIG/Reboot3
|
Linux/p3_BrowserHistory.py
|
p3_BrowserHistory.py
|
py
| 2,892 |
python
|
ko
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "psutil.process_iter",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "psutil.NoSuchProcess",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "psutil.AccessDenied",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "psutil.ZombieProcess",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "os.name",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "sqlite3.connect",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "os.access",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "os.R_OK",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "sys.stdout",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "sys.stdout",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "shutil.copy",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "os.remove",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 72,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 73,
"usage_type": "attribute"
},
{
"api_name": "shutil.copy",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "os.remove",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 82,
"usage_type": "attribute"
}
] |
29716875304
|
from basePlayer import BasePlayer
import itertools
from math import inf
from random import choice
import numpy as np
from generals import State
import pickle
from minimaxPlayer import MinMaxPlayer
from generals import PlayerEnum
from game import Board
from game import Controller
from generals import g
from sklearn.preprocessing import StandardScaler
from sklearn import linear_model
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LinearRegression
from sklearn import svm
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.naive_bayes import MultinomialNB
from sklearn.neighbors import KNeighborsClassifier
class SVMPlayer(BasePlayer):
def __init__(self, sign, board):
BasePlayer.__init__(self, board)
self.states_value = {} # state -> value
# get unique hash of current board state
def getHash(self, boardState):
boardHash = ""
for k in range(len(boardState)):
for l in range(len(boardState)):
boardHash += str(boardState[k][l].state.value)
return boardHash
def training(self, X, Y, rounds=10000):
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.33, random_state=42)
"""scaler = StandardScaler()
scaler.fit(X)
X_scaled = scaler.transform(X)
print(X_scaled) """
""" for i in range(len(X)):
print(str(X[i]) + " " + str(Y[i])) """
""" reg = svm.SVR(C=1.0, cache_size=1000, coef0=0.0, degree=3, epsilon=0.1,
gamma='auto_deprecated', kernel='rbf', max_iter=-1, shrinking=True,
tol=0.001, verbose=False) """
#SVRmodel.fit(X_scaled, Y)
#reg = RandomForestRegressor(max_depth=3, random_state=0, n_estimators=200)
reg = LogisticRegression(random_state=0, solver='lbfgs', multi_class='multinomial')
#reg = LinearRegression()
#reg = linear_model.BayesianRidge()
#reg = linear_model.Lasso(alpha=0.1)
#reg = linear_model.Ridge(alpha=1.0)
#reg = RandomForestClassifier(n_estimators=150, max_depth=4, random_state=0)
#reg = MultinomialNB()
#reg = KNeighborsClassifier(n_neighbors=3)
reg.fit(X, Y)
#print(reg.feature_importances_)
result = reg.score(X_train, Y_train)
print("Accuracy: %.2f%%" % (result*100.0))
print()
#print(X[3])
""" print(X_scaled[1]) """
#print(Y[3])
""" for i in range(len(X_test)):
print(X_test[i])
print(reg.predict([X_test[i]]))
print(Y_test[i]) """
return reg
def generateStates(self, boardSize, gameNumber):
board = Board(boardSize)
cont = Controller(PlayerEnum.Random, PlayerEnum.Random, board)
boardStates = []
winStates = []
winner = -1
for _ in itertools.repeat(None, gameNumber):
end = False
number_of_appends = 0
while board.freeCellCheck():
win = False
move = cont.player1.move()
boardState = self.transfromBoardState4(self.getHash(board.table))
if boardState not in boardStates:
boardStates.append(boardState)
number_of_appends = number_of_appends + 1
if cont.checkWin(cont.player1.sign, move):
win = True
end = True
winner = 1
board.reset()
break
if not board.freeCellCheck():
break
move = cont.player2.move()
boardState = self.transfromBoardState4(self.getHash(board.table))
if boardState not in boardStates:
boardStates.append(boardState)
number_of_appends = number_of_appends + 1
if cont.checkWin(cont.player2.sign, move):
win = True
end = True
winner = 2
board.reset()
break
if not win:
board.reset()
end = True
winner = 0
if end:
for _ in itertools.repeat(None, number_of_appends):
winStates.append(winner)
return [boardStates, winStates]
def transfromBoardState4(self, boardStateHash):
result = []
for i in range(len(boardStateHash)):
if boardStateHash[i] == '1':
result.append(1)
result.append(0)
result.append(0)
elif boardStateHash[i] == '2':
result.append(0)
result.append(1)
result.append(0)
else:
result.append(0)
result.append(0)
result.append(1)
return result
def transfromBoardState5(self, boardStateHash):
result = []
for i in range(len(boardStateHash)):
if boardStateHash[i] == '1':
result.append(1)
result.append(0)
elif boardStateHash[i] == '2':
result.append(0)
result.append(1)
else:
result.append(0)
result.append(0)
return result
|
Gbor97/TicTacToe
|
SVMPlayer.py
|
SVMPlayer.py
|
py
| 5,736 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "basePlayer.BasePlayer",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "basePlayer.BasePlayer.__init__",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "basePlayer.BasePlayer",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "sklearn.linear_model.LogisticRegression",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "game.Board",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "game.Controller",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "generals.PlayerEnum.Random",
"line_number": 92,
"usage_type": "attribute"
},
{
"api_name": "generals.PlayerEnum",
"line_number": 92,
"usage_type": "name"
},
{
"api_name": "itertools.repeat",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "itertools.repeat",
"line_number": 130,
"usage_type": "call"
}
] |
36180496136
|
"""
Zimri Leisher and Luca Araujo
Codeforces database, API and web app
"""
import sys
import traceback
import psycopg2
import json
import config
import flask
from collections import defaultdict
api = flask.Blueprint('api', __name__)
def get_connection():
return psycopg2.connect(database=config.database,
user=config.user,
password=config.password)
@api.route('/help')
def get_help():
return flask.send_file('.' + flask.url_for('static', filename='api-design.txt'), mimetype='text')
@api.route('/users/<institution_type>')
def get_users(institution_type):
"""REQUEST: /users/<institution_type>
institution_type (Required) -- defines whether we should search users
by their university or country
GET parameters
search_name (Optional, default: '') -- gives the name of the unversity/country
to search for
lowest_rating (Optional, default: -infinity) -- return only users with
rating bigger than or equal to the one given
highest_rating (Optional, default: infinity) -- return only users with
rating less than or equal to the one given
max_users (Optional, default: 10) -- the maximum number of users to return
if value given is higher than 500 it is changed to 500
RESPONSE: a JSON list of dictionaries, each of which represents one
user, sorted decreasingly by rating. Each dictionary in this list
will have the following fields.
handle -- (TEXT) the user's handle
name -- (TEXT) the user's name
rating -- (INTEGER) the user's current rating
max_rating -- (INTEGER) the user's maximum rating
rank -- (TEXT) the user's current rank
max_rank -- (TEXT) the user's maximum rank"""
lowest_rating = flask.request.args.get("lowest_rating")
highest_rating = flask.request.args.get("highest_rating")
max_users = flask.request.args.get("max_users")
institution_name = flask.request.args.get("institution_name")
query = """SELECT handle, first_name, last_name, rating, max_rating, user_rank, max_user_rank FROM users"""
predicates = []
args = {}
if lowest_rating:
predicates.append("""users.rating >= %(lowest_rating)s""")
args["lowest_rating"] = int(lowest_rating)
if highest_rating:
predicates.append("""users.rating <= %(highest_rating)s""")
args["highest_rating"] = int(highest_rating)
if institution_type and institution_name:
if institution_type == 'country':
predicates.append("""users.country ILIKE CONCAT('%%', %(institution_name)s, '%%')""")
else:
predicates.append("""users.organization ILIKE CONCAT('%%', %(institution_name)s, '%%')""")
args["institution_name"] = institution_name
if len(predicates) > 0:
query += " WHERE " + " AND ".join(predicates)
query += " ORDER BY (-users.rating, users.handle) LIMIT %(max_users)s"
args["max_users"] = int(max_users) if max_users else 50
users = []
try:
connection = get_connection()
cursor = connection.cursor()
cursor.execute(query, args)
for row in list(cursor):
users.append({"handle": row[0], "name": (row[1] if row[1] else "") + " " + (row[2] if row[2] else ""),
"rating": row[3], "max_rating": row[4], "user_rank": row[5], "max_user_rank": row[6]})
cursor.close()
connection.close()
except Exception as e:
traceback.print_exc()
return json.dumps(users)
@api.route('/problems')
def get_problems():
"""REQUEST: /problems
GET parameters
tag (Optional, default: '') -- returns only problems that contain
the defined tag. If left blank returns problems of any tag
lowest_rating (Optional, default: -infinity) -- return only problems with
rating bigger than or equal to the one given
highest_rating (Optional, default: infinity) -- return only problems with
rating less than or equal to the one given
max_problems (Optional, default: 10) -- the maximum number of problems to return
if value given is higher than 500 it is changed to 500
RESPONSE: a JSON list of dictionaries, each of which represents one
problem, sorted decreasingly by number of users who solved the problem.
Each dictionary in this list will have the following fields.
id -- (INTEGER) the codeforces id of the problem
name -- (TEXT) the problem's name
rating -- (INTEGER) the problem's rating
tags -- (TEXT) the list of tags of a problem separated by commas
solved_count -- (INTEGER) the number of users that solved that problem"""
# a user can ask for a query without any tag
tag = flask.request.args.get("tag")
lowest_rating = flask.request.args.get("lowest_rating")
highest_rating = flask.request.args.get("highest_rating")
max_problems = flask.request.args.get("max_problems")
query = """SELECT problems.problem_id, problems.name, rating, solved_count FROM problems"""
predicates = []
args = {}
if tag:
query += ", problem_tags, tags" # we only search through the tags if we need
predicates.append("""tags.name = %(tag)s
AND tags.id = problem_tags.tag_id
AND problem_tags.problem_id = problems.problem_id""")
args["tag"] = tag
if lowest_rating:
predicates.append("""problems.rating >= %(lowest_rating)s""")
args["lowest_rating"] = int(lowest_rating)
if highest_rating:
predicates.append("""problems.rating <= %(highest_rating)s""")
args["highest_rating"] = int(highest_rating)
if len(predicates) > 0:
query += " WHERE " + " AND ".join(predicates)
query += " ORDER BY problems.solved_count DESC LIMIT %(max_problems)s"
args["max_problems"] = int(max_problems) if max_problems else 50
problems = []
try:
connection = get_connection()
cursor = connection.cursor()
cursor.execute(query, args)
for row in list(cursor):
problems.append({"id": row[0], "name": row[1], "rating": row[2], "solved_count": row[3]})
for problem in problems:
local_query= """SELECT tags.name FROM tags, problem_tags
WHERE problem_tags.problem_id = %(id)s
AND problem_tags.tag_id = tags.id"""
local_args = {"id" : problem['id']}
cursor.execute(local_query, local_args)
problem["tags"] = []
for tag in list(cursor):
problem["tags"].append(tag[0])
problem["tags"] = ", ".join(problem["tags"])
cursor.close()
connection.close()
except Exception as e:
traceback.print_exc()
return json.dumps(problems)
@api.route('/contests/<data_requested>')
def get_contest_graph(data_requested):
"""REQUEST: /contests/<data_requested>
data_requested (Required) -- defines whether to display the graph by difficulty
of the contest (calculated by the average difficulty of each contest)
or by the number of users that solved any problem of that contest
GET parameters
lowest_id (Optional, default: 0) -- return only the contests with id
bigger than or equal to the given value
highest_id (Optional, default: infinity) -- return only the contests with id
less than or equal to the given value
RESPONSE: a JSON list of tuples of two elements, each of which represents one
contest, sorted increasingly by index. Each tuple contains a pair of (id, difficulty)
if the requested information was difficulty or a pair of (index, solved_count) if the
requested information was solved count"""
# values of the data requested must be either total_solves or difficulty
lowest_id = flask.request.args.get("lowest_id")
highest_id = flask.request.args.get("highest_id")
print("received args:", flask.request.args)
predicates = []
args = {}
query = """SELECT contests.id, contests.%(data_requested)s FROM contests"""
args["data_requested"] = psycopg2.extensions.AsIs(data_requested)
if lowest_id:
predicates.append("""contests.id >= %(lowest_id)s""")
args["lowest_id"] = int(lowest_id)
if highest_id:
predicates.append("""contests.id <= %(highest_id)s""")
args["highest_id"] = int(highest_id)
if len(predicates) > 0:
query += " WHERE " + " AND ".join(predicates)
query += " ORDER BY contests.id"
contests = []
try:
connection = get_connection()
cursor = connection.cursor()
cursor.execute(query, args)
for row in list(cursor):
contests.append((row[0], row[1]))
cursor.close()
connection.close()
except Exception as e:
traceback.print_exc()
return json.dumps(contests)
@api.route('/tags_graph/<received_tags>')
def get_tags_graph(received_tags):
"""REQUEST: /tags_graph/<tags>
tags (Required) -- returns a plot graph for each of the required
tags. The input is a list of tags separated by commas
RESPONSE: a JSON dictionary, each of which represents one
tag, sorted alphabetically by the name of the tag.
There is a field of the dictionary for every tag, the field
contains a list of tuples with the following parameters:
rating -- (INTEGER) the rating range being counted
count -- (INTEGER) the number of problems with that tag in that rating range"""
received_tags = received_tags.split(',')
print("received args:", received_tags)
tags = {}
for tag in received_tags:
args = {}
query = """SELECT problems.rating, COUNT(problems.rating) FROM tags, problem_tags, problems
WHERE tags.name = %(tag)s
AND tags.id = problem_tags.tag_id
AND problem_tags.problem_id = problems.problem_id
GROUP BY problems.rating
ORDER BY problems.rating"""
args["tag"] = tag
try:
connection = get_connection()
cursor = connection.cursor()
cursor.execute(query, args)
tags[tag] = []
for element in list(cursor):
if(element[0]): # I don't want the problems that have no rating (which is represented as 0)
tags[tag].append((element[0], element[1]))
cursor.close()
connection.close()
except Exception as e:
traceback.print_exc()
return json.dumps(tags)
@api.route('/tag_names')
def get_tag_names():
"""REQUEST: /tag_names
RESPONSE: a JSON list of TEXT with all the tag names"""
query = """SELECT name FROM tags"""
tags = []
try:
connection = get_connection()
cursor = connection.cursor()
cursor.execute(query,)
tags = []
for tag in list(cursor):
tags.append(tag[0])
cursor.close()
connection.close()
except Exception as e:
traceback.print_exc()
return json.dumps(tags)
@api.route('/tags_intersection/<received_tags>')
def get_tags_intersection(received_tags):
"""REQUEST: /tags_intersection/<tags>
tags (Required) -- returns the information for the problems that
contain all tags. The input is a list of tags separated by commas
RESPONSE: a JSON list of tuples, each of which represents a rating range,
sorted decreasingly by rating. Each tuple will have the following fields:
rating -- (INTEGER) the beginning of the rating range (all problems ratings' are multiples of 100)
problem_count -- (INTEGER) the count of problems in that range
with those tags
solved_count -- (INTEGER) the count of solutions of problems
in that range with those tags"""
received_tags = received_tags.split(',')
query = """SELECT problems.rating, problems.solved_count
FROM problems, problem_tags, tags
WHERE problem_tags.tag_id = tags.id AND problems.problem_id = problem_tags.problem_id
AND tags.name IN (
"""
# match only problems with all of the given tags
query += ','.join(['%s' for i in range(len(received_tags))])
query += """) GROUP BY (problems.problem_id, problems.rating, problems.solved_count) HAVING COUNT(problems.problem_id) = %s"""
ratingCount = defaultdict(int)
solvedCount = defaultdict(int)
try:
connection = get_connection()
cursor = connection.cursor()
cursor.execute(query, received_tags + [len(received_tags)])
for element in list(cursor):
rating = element[0]
solves = element[1]
if not rating:
continue
ratingCount[rating] += 1
solvedCount[rating] += solves
cursor.close()
connection.close()
except Exception as e:
traceback.print_exc()
# return a list of (rating, count at that rating, solutions at that rating) of problems with these tags
return json.dumps(sorted([(rating, ratingCount[rating], solvedCount[rating]) for rating in ratingCount]))
|
LucaDantas/cs257
|
webapp/api.py
|
api.py
|
py
| 13,186 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "flask.Blueprint",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "psycopg2.connect",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "config.database",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "config.user",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "config.password",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "flask.send_file",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "flask.request.args.get",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "flask.request",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "flask.request.args.get",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "flask.request",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_name": "flask.request.args.get",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "flask.request",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "flask.request.args.get",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "flask.request",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "traceback.print_exc",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "flask.request.args.get",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "flask.request",
"line_number": 135,
"usage_type": "attribute"
},
{
"api_name": "flask.request.args.get",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "flask.request",
"line_number": 136,
"usage_type": "attribute"
},
{
"api_name": "flask.request.args.get",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "flask.request",
"line_number": 137,
"usage_type": "attribute"
},
{
"api_name": "flask.request.args.get",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "flask.request",
"line_number": 138,
"usage_type": "attribute"
},
{
"api_name": "traceback.print_exc",
"line_number": 192,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 194,
"usage_type": "call"
},
{
"api_name": "flask.request.args.get",
"line_number": 216,
"usage_type": "call"
},
{
"api_name": "flask.request",
"line_number": 216,
"usage_type": "attribute"
},
{
"api_name": "flask.request.args.get",
"line_number": 217,
"usage_type": "call"
},
{
"api_name": "flask.request",
"line_number": 217,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 219,
"usage_type": "attribute"
},
{
"api_name": "psycopg2.extensions.AsIs",
"line_number": 225,
"usage_type": "call"
},
{
"api_name": "psycopg2.extensions",
"line_number": 225,
"usage_type": "attribute"
},
{
"api_name": "traceback.print_exc",
"line_number": 252,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 254,
"usage_type": "call"
},
{
"api_name": "traceback.print_exc",
"line_number": 301,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 303,
"usage_type": "call"
},
{
"api_name": "traceback.print_exc",
"line_number": 324,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 326,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 356,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 357,
"usage_type": "call"
},
{
"api_name": "traceback.print_exc",
"line_number": 374,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 377,
"usage_type": "call"
}
] |
10062234348
|
import dataclasses
import logging
import typing
import httpx
from sequoia.exceptions import DiscoveryResourcesError, DiscoveryServicesError, ResourceNotFound, ServiceNotFound
logger = logging.getLogger(__name__)
__all__ = ["Resource", "ResourcesRegistry", "Service", "ServicesRegistry"]
@dataclasses.dataclass
class Resource:
"""
Representation of a resource part of a Sequoia service.
"""
name: str
path: str
class ResourcesRegistry(dict):
"""
Mapping of available resources by name.
"""
def __getitem__(self, key: str) -> Resource:
try:
return super().__getitem__(key)
except KeyError:
raise ResourceNotFound(key)
@dataclasses.dataclass
class Service:
"""
Representation of a Sequoia service.
"""
name: str
url: str
title: typing.Optional[str] = dataclasses.field(default=None, hash=False, compare=False)
description: typing.Optional[str] = dataclasses.field(default=None, hash=False, compare=False)
async def discover(self):
"""
Request a service description endpoint to discover its resources and metadata.
"""
async with httpx.AsyncClient(timeout=60) as client:
try:
response = await client.get(f"{self.url}/descriptor/raw/")
response.raise_for_status()
response = response.json()
self.title = response["title"]
self.description = response["description"]
self._resources = ResourcesRegistry(
{
i["hyphenatedPluralName"].replace("-", "_"): Resource(
name=i["pluralName"], path=f"{i['path']}/{i['hyphenatedPluralName']}"
)
for i in response["resourcefuls"].values()
}
)
except KeyError:
logger.exception("Wrong response retrieving description of service '%s': %s", self.name, str(response))
raise DiscoveryResourcesError(service=self.name)
except (httpx.exceptions.HTTPError, OSError):
raise DiscoveryResourcesError(service=self.name)
@property
async def resources(self) -> ResourcesRegistry:
"""
Return the registry containing all the resources that are part of this service. This registry will be loaded
when requested, following lazy pattern.
:return: Resources registry.
"""
if not hasattr(self, "_resources"):
await self.discover()
return self._resources
class ServicesRegistry(dict):
"""
Mapping of available services by name.
"""
def __getitem__(self, item):
try:
value = super().__getitem__(item)
except KeyError:
raise ServiceNotFound(item)
return value
async def discover(self, registry_url: str, owner: typing.Optional[str] = None):
"""
Request Registry service to update the list of all available services.
:return: Services registry.
"""
async with httpx.AsyncClient(timeout=60) as client:
try:
response = await client.get(f"{registry_url}/services/{owner or 'root'}/")
response.raise_for_status()
response = response.json()
self.clear()
self.update(
sorted(
{i["name"]: Service(name=i["name"], url=i["location"]) for i in response["services"]}.items()
)
)
except KeyError:
logger.exception("Wrong response retrieving list of services from 'registry': %s", str(response))
raise DiscoveryServicesError()
except (httpx.exceptions.HTTPError, OSError):
raise DiscoveryServicesError()
|
pikselpalette/sequoia-python-client-sdk-async
|
sequoia/types.py
|
types.py
|
py
| 3,925 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "logging.getLogger",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "dataclasses.dataclass",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "sequoia.exceptions.ResourceNotFound",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "typing.Optional",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "dataclasses.field",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "typing.Optional",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "dataclasses.field",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "httpx.AsyncClient",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "sequoia.exceptions.DiscoveryResourcesError",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "httpx.exceptions",
"line_number": 70,
"usage_type": "attribute"
},
{
"api_name": "sequoia.exceptions.DiscoveryResourcesError",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "dataclasses.dataclass",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "sequoia.exceptions.ServiceNotFound",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "typing.Optional",
"line_number": 100,
"usage_type": "attribute"
},
{
"api_name": "httpx.AsyncClient",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "sequoia.exceptions.DiscoveryServicesError",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "httpx.exceptions",
"line_number": 121,
"usage_type": "attribute"
},
{
"api_name": "sequoia.exceptions.DiscoveryServicesError",
"line_number": 122,
"usage_type": "call"
}
] |
30578251026
|
import logging
import itertools
import math
import sys
import collections
import datetime
import shutil
import click
from .status import JobStatus, JOB_EVENT_STATUS_TRANSITIONS
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
SYMBOLS = [" ", "I", "R", "X", "C", "H", "S"]
STATUS_TO_SYMBOL = dict(zip(JobStatus, SYMBOLS))
COLORS = ["black", "yellow", "blue", "magenta", "green", "red", "magenta"]
SYMBOL_TO_COLOR = dict(zip(SYMBOLS, COLORS))
def make_state_graph(events):
job_states = {}
job_state_counts = collections.Counter()
counts_over_time = []
for event in sorted(events, key=lambda e: e.timestamp):
event_key = (event.cluster, event.proc)
new_status = JOB_EVENT_STATUS_TRANSITIONS.get(event.type, None)
if new_status is not None:
old_status = job_states.get(event_key, None)
job_states[event_key] = new_status
job_state_counts[new_status] += 1
if old_status is not None:
job_state_counts[old_status] -= 1
counts_over_time.append((event.timestamp, job_state_counts.copy()))
term = shutil.get_terminal_size((80, 20))
width = term.columns - 10
height = term.lines - 10
graph = make_bars(counts_over_time, width, height)
rows = ["│" + row for row in graph.splitlines()]
rows.append("└" + ("─" * (width)))
first_time, _ = counts_over_time[0]
last_time, _ = counts_over_time[-1]
left_date_str = (
datetime.datetime.fromtimestamp(first_time)
.strftime("%y-%m-%d %H:%M:%S")
.ljust(width + 1)
)
right_date_str = (
datetime.datetime.fromtimestamp(last_time)
.strftime("%y-%m-%d %H:%M:%S")
.rjust(width + 1)
)
time_str = "Time".center(width + 1)
rows.append(merge_strings(left_date_str, right_date_str, time_str))
max_jobs = max(total_counts(c) for _, c in counts_over_time)
extra_len = max(len(str(max_jobs)), len("# Jobs"))
new_rows = []
for idx, row in enumerate(rows):
if idx == 0:
new_rows.append(str(max_jobs).rjust(extra_len) + row)
elif idx == len(rows) - 2:
new_rows.append("0".rjust(extra_len) + row)
elif idx == len(rows) // 2:
new_rows.append("# Jobs".rjust(extra_len) + row)
else:
new_rows.append((" " * extra_len) + row)
rows = new_rows
graph = "\n".join(rows)
return graph
def merge_strings(*strings):
max_len = max(len(s) for s in strings)
out = [" "] * max_len
for string in strings:
for idx, char in enumerate(string):
if out[idx] == " " and char != " ":
out[idx] = char
return "".join(out)
def make_bars(counts_over_time, width, height):
first_time, _ = counts_over_time[0]
last_time, last_counts = counts_over_time[-1]
groups = list(group_counts_by_time(counts_over_time, width))
counts = [avg_counts(group) for group in groups]
counts[0] = groups[0][-1][1]
counts[-1] = last_counts
max_jobs = max(total_counts(c) for c in counts if c is not None)
columns = []
for count in counts:
if count is None:
columns.append(columns[-1])
continue
bar_lens = calculate_column_partition(count, max_jobs, height)
columns.append(
"".join(
symbol * bar_lens[status] for status, symbol in STATUS_TO_SYMBOL.items()
)
)
rows = list(
reversed(list(map(list, itertools.zip_longest(*columns, fillvalue=" "))))
)
rows = [
"".join(
click.style("█" * len(list(group)), fg=SYMBOL_TO_COLOR[symbol])
for symbol, group in itertools.groupby(row)
)
for row in rows
]
return "\n".join(rows)
def calculate_column_partition(counts, max_jobs, height):
raw_split = [(counts.get(status, 0) / max_jobs) * height for status in JobStatus]
int_split = [0 for _ in range(len(raw_split))]
carry = 0
for idx, entry in enumerate(raw_split):
dec = entry - math.floor(entry)
if entry == 0:
int_split[idx] = 0
elif dec >= 0.5:
int_split[idx] = math.ceil(entry)
elif math.floor(entry) == 0:
int_split[idx] = 1
carry += 1
elif dec < 0.5:
int_split[idx] = math.floor(entry)
else:
raise Exception("Unreachable")
int_split[int_split.index(max(int_split))] -= carry
return {k: v for k, v in zip(JobStatus, int_split)}
def _calculate_bar_component_len(count, total, bar_width):
if count == 0:
return 0
return max(int((count / total) * bar_width), 1)
def total_counts(counter):
return sum(counter.values())
def group_counts_by_time(counts_over_time, n_divisions):
first_time, _ = counts_over_time[0]
last_time, _ = counts_over_time[-1]
dt = (last_time - first_time) / n_divisions
left_idx = 0
right_idx = 0
for left_time in (first_time + (n * dt) for n in range(n_divisions)):
right_time = left_time + dt
for right_idx, (timestamp, _) in enumerate(
counts_over_time[left_idx:], start=left_idx
):
if timestamp > right_time:
break
yield counts_over_time[left_idx:right_idx]
left_idx = right_idx
def avg_counts(counts_over_time):
lc = len(counts_over_time)
if lc == 0:
return None
counts = [counts for _, counts in counts_over_time]
return collections.Counter(
{k: v / lc for k, v in sum(counts, collections.Counter()).items()}
)
if __name__ == "__main__":
make_state_graph(sys.argv[1])
|
JoshKarpel/condor_necropsy
|
condor_necropsy/state_graph.py
|
state_graph.py
|
py
| 5,724 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "logging.getLogger",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "status.JobStatus",
"line_number": 18,
"usage_type": "argument"
},
{
"api_name": "collections.Counter",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "status.JOB_EVENT_STATUS_TRANSITIONS.get",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "status.JOB_EVENT_STATUS_TRANSITIONS",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "shutil.get_terminal_size",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.fromtimestamp",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.fromtimestamp",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "itertools.zip_longest",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "click.style",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "itertools.groupby",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "status.JobStatus",
"line_number": 143,
"usage_type": "name"
},
{
"api_name": "math.floor",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "math.ceil",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "math.floor",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "math.floor",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "status.JobStatus",
"line_number": 164,
"usage_type": "argument"
},
{
"api_name": "collections.Counter",
"line_number": 206,
"usage_type": "call"
},
{
"api_name": "collections.Counter",
"line_number": 207,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 212,
"usage_type": "attribute"
}
] |
39830489884
|
from collections import deque
def solution(n, info):
answer = []
diff = 0
queue = deque()
queue.append((0, [0,0,0,0,0,0,0,0,0,0,0]))
while queue:
idx, arr = queue.popleft()
# 화살을 전부 쐈을경우
if sum(arr) == n:
# 어피치와 라이언 점수 체크
apeach, lion = 0, 0
for i in range(11):
if info[i] == arr[i] == 0:
continue
if info[i] >= arr[i]:
apeach += 10 - i
else:
lion += 10 - i
# 라이언의 점수가 어피치 보다 크면
if apeach < lion:
curr_diff = lion - apeach
# 점수차가 이전보다 작을경우
if diff > curr_diff:
continue
# 점수차가 이전보다 클경우
if diff < curr_diff:
# 점수차 변경
diff = curr_diff
# 결과 리스트 초기화
answer.clear()
# 결과 리스트에 추가
answer.append(arr)
# 화살을 제한 개수보다 많이 쐈을경우
elif sum(arr) > n:
continue
# 현재 인덱스가 0점인 과녁일경우
elif idx == 10:
temp = arr.copy()
# 남은 화살 개수 만큼 0점에 쏘기
temp[idx] = n - sum(arr)
queue.append((-1, temp))
else:
# 어피치보다 해당 과녁을 더많이 맞춤
temp = arr.copy()
temp[idx] = info[idx] + 1
queue.append((idx + 1, temp))
# 해당과녁 맞추지 않음
temp2 = arr.copy()
temp2[idx] = 0
queue.append((idx + 1, temp2))
return answer[-1] if answer else [-1]
print(solution(5, [2,1,1,1,0,0,0,0,0,0,0]))
print(solution(1, [1,0,0,0,0,0,0,0,0,0,0]))
print(solution(9, [0,0,1,2,0,1,1,1,1,1,1]))
print(solution(10, [0,0,0,0,0,0,0,0,3,4,3]))
|
omg7152/CodingTestPractice
|
kakaoBlindRecruitment2022/Q4.py
|
Q4.py
|
py
| 2,081 |
python
|
ko
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "collections.deque",
"line_number": 7,
"usage_type": "call"
}
] |
21160883826
|
import os
import math
import torch
import pytorch_lightning as pl
import torch.nn.functional as F
import torch.nn as nn
from numpy import sqrt, argmax
from torch.optim import lr_scheduler
from .model import CNN
import numpy as np
import pandas as pd
from sklearn.metrics import roc_curve, confusion_matrix, roc_auc_score
from matplotlib import pyplot
from backbone.vit_pytorch import cait, vit, deepvit
from backbone.torchvision.models_orig import resnet, densenet, inception
from factory.config import *
class Model(pl.LightningModule):
def __init__(self, model_name):
super().__init__()
self.model_name = model_name
# efficientnet-b0 ~ efficientnet-b5
if model_name == 'efficientnet-b0':
self.net = CNN(backbone="efficientnet-b0", freeze=False)
if model_name == 'efficientnet-b1':
self.net = CNN(backbone="efficientnet-b1", freeze=False)
if model_name == 'efficientnet-b2':
self.net = CNN(backbone="efficientnet-b1", freeze=False)
if model_name == 'efficientnet-b3':
self.net = CNN(backbone="efficientnet-b2", freeze=False)
if model_name == 'efficientnet-b4':
self.net = CNN(backbone="efficientnet-b4", freeze=False)
if model_name == 'efficientnet-b5':
self.net = CNN(backbone="efficientnet-b5", freeze=False)
#naive vit
elif model_name == 'vit':
self.net = vit.ViT(image_size=IMG_SIZE , patch_size=32, num_classes=2, dim=1024, depth=6, heads=16, mlp_dim=2048, dropout=0.1, emb_dropout=0.1)
#Cait
elif model_name == 'cait':
self.net = cait.CaiT(image_size=IMG_SIZE, patch_size=32, num_classes=2, dim=1024, depth=12, cls_depth=2, heads=16, mlp_dim=2048, dropout=0.1, emb_dropout=0.1, layer_dropout=0.05)
#deep vit
elif model_name == 'deepvit':
self.net = deepvit.DeepViT(image_size=IMG_SIZE, patch_size=32, num_classes=2, dim=1024, depth=6, heads=16, mlp_dim=2048, dropout=0.1, emb_dropout=0.1)
#resnet50
elif model_name == 'resnet50':
self.net = resnet.resnet50(pretrained=True)
#resnet101
elif model_name == 'resnet101':
self.net = resnet.resnet101(pretrained=True)
#resnet152
elif model_name == 'resnet152':
self.net = resnet.resnet152(pretrained=True)
#densenet121
elif model_name == 'densenet121':
self.net = densenet.densenet121(pretrained=True)
#densenet161
elif model_name == 'densenet161':
self.net = densenet.densenet161(pretrained=True)
#densenet169
elif model_name == 'densenet169':
self.net = densenet.densenet169(pretrained=True)
#densenet201
elif model_name == 'densenet201':
self.net = densenet.densenet201(pretrained=True)
#inception_v3
elif model_name == 'inception_v3':
self.net = inception.inception_v3(pretrained=True)
hidden_dim1 = 256
hidden_dim2 = 64
num_classes = 2
dropout = 0.1
self.classifier = nn.Sequential(
nn.Linear(1000, hidden_dim1),
nn.GELU(), nn.Dropout(dropout),
nn.Linear(hidden_dim1, hidden_dim2),
nn.GELU(), nn.Dropout(dropout),
nn.Linear(hidden_dim2, num_classes)
)
self.train_preds = []
self.train_gts = []
self.valid_preds = []
self.valid_gts = []
self.test_preds = []
self.test_probs = []
self.test_gts = []
def forward(self, x):
if 'efficientnet' in self.model_name:
return self.net(x)
elif 'inception' in self.model_name:
x = self.net(x)
return self.classifier(x.logits)
else:
x = self.net(x)
return self.classifier(x)
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=1e-4)
scheduler = lr_scheduler.StepLR(optimizer, step_size=5, gamma=0.1)
return [optimizer], [scheduler]
def training_step(self, batch, batch_idx):
x, y = batch
y_hat = self.forward(x)
loss = F.cross_entropy(y_hat, y)
for gy in y:
self.train_gts.append(gy.cpu().item())
for py in y_hat:
c = torch.argmax(py)
self.train_preds.append(c.cpu().item())
self.log("loss", loss, on_epoch=True, prog_bar=True)
return loss
def training_epoch_end(self, outputs):
acc, sen, spe, ppv, npv, tn, fp, fn, tp = self.calculate_metrics(
self.train_gts, self.train_preds
)
avg_loss = torch.stack([x['loss'] for x in outputs]).mean()
self.log("train_avg_loss", avg_loss, on_epoch=True, prog_bar=True)
self.log("train_acc", acc, on_epoch=True, prog_bar=True)
self.log("train_sensitivity(recall)", sen, on_epoch=True, prog_bar=True)
self.log("train_specificity", spe, on_epoch=True, prog_bar=True)
self.log("train_ppv(precision)", ppv, on_epoch=True, prog_bar=True)
self.log("train_npv", npv, on_epoch=True, prog_bar=True)
self.log("train_tn", tn , on_epoch=True, prog_bar=True)
self.log("train_fp", fp, on_epoch=True, prog_bar=True)
self.log("train_fn", fn, on_epoch=True, prog_bar=True)
self.log("train_tp", tp, on_epoch=True, prog_bar=True)
self.train_preds = []
self.train_gts = []
def validation_step(self, batch, batch_idx):
x, y = batch
y_hat = self.forward(x)
loss = F.cross_entropy(y_hat, y)
for gy in y:
self.valid_gts.append(gy.cpu().item())
for py in y_hat:
c = torch.argmax(py)
self.valid_preds.append(c.cpu().item())
acc, sen, spe, ppv, npv, tn, fp, fn, tp = self.calculate_metrics(
self.valid_gts, self.valid_preds
)
self.log("val_bat_loss", loss, on_epoch=True, prog_bar=True)
self.log("val_acc", acc, on_epoch=True, prog_bar=True)
self.log("val_sensitivity(recall)", sen, on_epoch=True, prog_bar=True)
self.log("val_specificity", spe, on_epoch=True, prog_bar=True)
self.log("val_ppv(precision)", ppv, on_epoch=True, prog_bar=True)
self.log("val_npv", npv, on_epoch=True, prog_bar=True)
self.log("val_tn", tn , on_epoch=True, prog_bar=True)
self.log("val_fp", fp, on_epoch=True, prog_bar=True)
self.log("val_fn", fn, on_epoch=True, prog_bar=True)
self.log("val_tp", tp, on_epoch=True, prog_bar=True)
return {
"val_bat_loss": loss, "val_acc": acc,
"val_sensitivity(recall)": sen, "val_specificity": spe,
"val_ppv(precision)":ppv, "val_npv": npv,
"val_tn": tn, "val_fp": fp, "val_fn": fn, "val_tp": tp,
}
def validation_epoch_end(self, outputs):
acc, sen, spe, ppv, npv, tn, fp, fn, tp = self.calculate_metrics(
self.valid_gts, self.valid_preds
)
avg_loss = torch.stack([x['val_bat_loss'] for x in outputs]).mean()
self.log("val_avg_loss", avg_loss, on_epoch=True, prog_bar=True)
self.log("val_acc", acc, on_epoch=True, prog_bar=True)
self.log("val_sensitivity(recall)", sen, on_epoch=True, prog_bar=True)
self.log("val_specificity", spe, on_epoch=True, prog_bar=True)
self.log("val_ppv(precision)", ppv, on_epoch=True, prog_bar=True)
self.log("val_npv", npv, on_epoch=True, prog_bar=True)
self.log("val_tn", tn , on_epoch=True, prog_bar=True)
self.log("val_fp", fp, on_epoch=True, prog_bar=True)
self.log("val_fn", fn, on_epoch=True, prog_bar=True)
self.log("val_tp", tp, on_epoch=True, prog_bar=True)
self.valid_preds = []
self.valid_gts = []
def test_step(self, batch, batch_idx):
x, y = batch
y_hat = self.forward(x)
loss = F.cross_entropy(y_hat, y)
for gy in y:
self.test_gts.append(gy.cpu().item())
for py in y_hat:
c = torch.argmax(py)
p = F.softmax(py, dim=0)[1]
self.test_probs.append(p.cpu().item())
self.test_preds.append(c.cpu().item())
self.log("test_loss", loss, on_epoch=True, prog_bar=True)
return {'test_loss': loss}
def test_epoch_end(self, outputs):
acc, sen, spe, ppv, npv, tn, fp, fn, tp = self.calculate_metrics(
self.test_gts, self.test_preds
)
auc = self.calculate_auc(self.test_gts, self.test_probs)
avg_loss = torch.stack([x['test_loss'] for x in outputs]).mean()
self.log("test_avg_loss", avg_loss, on_epoch=True, prog_bar=True)
self.log("test_acc", acc, on_epoch=True, prog_bar=True)
self.log("test_sensitivity(recall)", sen, on_epoch=True, prog_bar=True)
self.log("test_specificity", spe, on_epoch=True, prog_bar=True)
self.log("test_ppv(precision)", ppv, on_epoch=True, prog_bar=True)
self.log("test_npv", npv, on_epoch=True, prog_bar=True)
self.log("test_auc", auc, on_epoch=True, prog_bar=True)
self.log("test_tn", tn , on_epoch=True, prog_bar=True)
self.log("test_fp", fp, on_epoch=True, prog_bar=True)
self.log("test_fn", fn, on_epoch=True, prog_bar=True)
self.log("test_tp", tp, on_epoch=True, prog_bar=True)
print('============' * 5)
print('Accuracy : {:.4f}, Recall(Sensitivity) : {:.4f}, Specificity :{:.4f}, PPV(Precision) : {:.4f}, NPV : {:.4f}, Auc : {:.4f}, Confusion : ( TP-{} | FP-{} | FN-{} | TN-{} )'.format(acc, sen, spe, ppv, npv, auc, tp, fp, fn, tn))
print('============' * 5)
dfGTs = pd.DataFrame(np.round_(np.array(self.test_gts)))
dfPreds = pd.DataFrame(np.round_(np.array(self.test_preds)))
dfProbs = pd.DataFrame(np.round_(np.array(self.test_probs) * 100, 3))
pd.concat([dfGTs, dfPreds, dfProbs], axis=1).to_csv('./test.csv', index=False)
def calculate_metrics(self, gts, preds):
tn, fp, fn, tp = confusion_matrix(gts, preds, labels=[0,1]).ravel()
if math.isnan(tn): tn = 0
if math.isnan(fp): fp = 0
if math.isnan(fn): fn = 0
if math.isnan(tp): tp = 0
acc = (tp + tn) / (tn + fp + fn + tp)
sen = tp / (tp + fn)
spe = tn / (tn + fp)
ppv = tp / (tp + fp)
npv = tn / (tn + fn)
if math.isnan(acc): acc = 0
if math.isnan(sen): sen = 0
if math.isnan(spe): spe = 0
if math.isnan(ppv): ppv = 0
if math.isnan(npv): npv = 0
return np.float32(acc), np.float32(sen), np.float32(spe), np.float32(ppv), np.float32(npv), tn, fp, fn, tp
def calculate_auc(self, gts, probs):
try:
auc = roc_auc_score(gts, probs)
ns_probs = [0 for _ in range(len(gts))]
lr_probs = probs
ns_auc = roc_auc_score(gts, ns_probs)
lr_auc = roc_auc_score(gts, lr_probs)
ns_fpr, ns_tpr, _ = roc_curve(gts, ns_probs)
lr_fpr, lr_tpr, _ = roc_curve(gts, lr_probs)
# calculate g-mean for each threshold
gmeans = sqrt(lr_tpr * (1-lr_fpr))
ix = argmax(gmeans)
# plot True, Predict, Best
pyplot.scatter(lr_fpr[ix], lr_tpr[ix], marker='*', color='black', label='Best')
pyplot.text(lr_fpr[ix] + 0.05, lr_tpr[ix] - 0.05, "FPR: {}\nTPR: {}".format(lr_fpr[ix], lr_tpr[ix]), fontsize=7)
pyplot.plot(ns_fpr, ns_tpr, linestyle='--', label='True')
pyplot.plot(lr_fpr, lr_tpr, marker=',', label='Predict (auc={})'.format(round(auc, 3)))
pyplot.xlabel('False Positive Rate (1 - Specificity)')
pyplot.ylabel('True Positive Rate (Sensitivity)')
pyplot.legend()
pyplot.savefig('test_roc.png', dpi=600)
except ValueError:
auc=0
return auc
|
Junkkkk/ovarian_cancer_detection
|
models/lightning_model.py
|
lightning_model.py
|
py
| 12,263 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "pytorch_lightning.LightningModule",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "model.CNN",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "model.CNN",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "model.CNN",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "model.CNN",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "model.CNN",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "model.CNN",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "backbone.vit_pytorch.vit.ViT",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "backbone.vit_pytorch.vit",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "backbone.vit_pytorch.cait.CaiT",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "backbone.vit_pytorch.cait",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "backbone.vit_pytorch.deepvit.DeepViT",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "backbone.vit_pytorch.deepvit",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "backbone.torchvision.models_orig.resnet.resnet50",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "backbone.torchvision.models_orig.resnet",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "backbone.torchvision.models_orig.resnet.resnet101",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "backbone.torchvision.models_orig.resnet",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "backbone.torchvision.models_orig.resnet.resnet152",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "backbone.torchvision.models_orig.resnet",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "backbone.torchvision.models_orig.densenet.densenet121",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "backbone.torchvision.models_orig.densenet",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "backbone.torchvision.models_orig.densenet.densenet161",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "backbone.torchvision.models_orig.densenet",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "backbone.torchvision.models_orig.densenet.densenet169",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "backbone.torchvision.models_orig.densenet",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "backbone.torchvision.models_orig.densenet.densenet201",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "backbone.torchvision.models_orig.densenet",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "backbone.torchvision.models_orig.inception.inception_v3",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "backbone.torchvision.models_orig.inception",
"line_number": 85,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 92,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 93,
"usage_type": "name"
},
{
"api_name": "torch.nn.GELU",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 94,
"usage_type": "name"
},
{
"api_name": "torch.nn.Dropout",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "torch.nn.Linear",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 95,
"usage_type": "name"
},
{
"api_name": "torch.nn.GELU",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 96,
"usage_type": "name"
},
{
"api_name": "torch.nn.Dropout",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "torch.nn.Linear",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 97,
"usage_type": "name"
},
{
"api_name": "torch.optim.Adam",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "torch.optim",
"line_number": 122,
"usage_type": "attribute"
},
{
"api_name": "torch.optim.lr_scheduler.StepLR",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "torch.optim.lr_scheduler",
"line_number": 123,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.cross_entropy",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 129,
"usage_type": "name"
},
{
"api_name": "torch.argmax",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "torch.stack",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional.cross_entropy",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 164,
"usage_type": "name"
},
{
"api_name": "torch.argmax",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "torch.stack",
"line_number": 198,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional.cross_entropy",
"line_number": 218,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 218,
"usage_type": "name"
},
{
"api_name": "torch.argmax",
"line_number": 223,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional.softmax",
"line_number": 224,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 224,
"usage_type": "name"
},
{
"api_name": "torch.stack",
"line_number": 237,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 256,
"usage_type": "call"
},
{
"api_name": "numpy.round_",
"line_number": 256,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 256,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 257,
"usage_type": "call"
},
{
"api_name": "numpy.round_",
"line_number": 257,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 257,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 258,
"usage_type": "call"
},
{
"api_name": "numpy.round_",
"line_number": 258,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 258,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 260,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.confusion_matrix",
"line_number": 264,
"usage_type": "call"
},
{
"api_name": "math.isnan",
"line_number": 266,
"usage_type": "call"
},
{
"api_name": "math.isnan",
"line_number": 267,
"usage_type": "call"
},
{
"api_name": "math.isnan",
"line_number": 268,
"usage_type": "call"
},
{
"api_name": "math.isnan",
"line_number": 269,
"usage_type": "call"
},
{
"api_name": "math.isnan",
"line_number": 277,
"usage_type": "call"
},
{
"api_name": "math.isnan",
"line_number": 278,
"usage_type": "call"
},
{
"api_name": "math.isnan",
"line_number": 279,
"usage_type": "call"
},
{
"api_name": "math.isnan",
"line_number": 280,
"usage_type": "call"
},
{
"api_name": "math.isnan",
"line_number": 281,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 283,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.roc_auc_score",
"line_number": 287,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.roc_auc_score",
"line_number": 292,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.roc_auc_score",
"line_number": 293,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.roc_curve",
"line_number": 295,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.roc_curve",
"line_number": 296,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 299,
"usage_type": "call"
},
{
"api_name": "numpy.argmax",
"line_number": 300,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 303,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 303,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.text",
"line_number": 304,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 304,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 306,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 306,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 307,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 307,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 309,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 309,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 310,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 310,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 312,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 312,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 314,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 314,
"usage_type": "name"
}
] |
32059806236
|
#!/usr/bin/env python3
"""
led.py
Notes
-----
- Docstrings follow the numpydoc style:
https://numpydoc.readthedocs.io/en/latest/format.html
- Code follows the PEP 8 style guide:
https://www.python.org/dev/peps/pep-0008/
"""
import RPi.GPIO as GPIO
from time import sleep
import logging
import constants as c
LED_OUTPUT_PIN = 21
LED_TEST_TIME_SECS = 0.5
GPIO_WARNINGS_OFF = False
ON_STRING = 'ON'
OFF_STRING = 'OFF'
class Led:
"""
Class to represent Led actuator
Attributes
----------
__pin : int
BCM GPIO pin number
__led_on : bool
True if LED on
Methods
-------
get_led_status()
Returns the status of LED
set_led_status(status)
Sets the LED status
invert_status()
Inverts the status of the LED
"""
def __init__(self, pin=LED_OUTPUT_PIN):
"""
Initializes the Led
Parameters
----------
pin : int
BCM GPIO pin number
"""
self.__pin = pin
self.__led_on = c.LED_OFF
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(GPIO_WARNINGS_OFF)
GPIO.setup(self.__pin, GPIO.OUT)
def get_status(self):
"""
Returns
-------
self.__led_on : bool
True if LED on
"""
return self.__led_on
def set_status(self, status):
"""
Sets the LED status
Parameters
----------
status : bool
True if LED on, False if LED off
"""
output_gpio = None
output_string = ''
if status == c.LED_ON:
output_gpio = GPIO.HIGH
output_string = ON_STRING
else:
output_gpio = GPIO.LOW
output_string = OFF_STRING
GPIO.output(self.__pin, output_gpio)
self.__led_on = status
logging.debug('LED status updated to {}'.format(output_string))
def invert_status(self):
"""
Inverts the LED status
- If LED status on, turn off LED
- If LED status off, turn on LED
Returns
-------
self.__led_on : bool
True if LED on
"""
self.set_status(not self.__led_on)
return self.__led_on
def led_test():
"""
Creates an Led object for manual LED verification
"""
led = Led()
led.set_status(c.LED_ON)
sleep(LED_TEST_TIME_SECS)
led.set_status(c.LED_OFF)
GPIO.cleanup()
if __name__ == '__main__':
logging.basicConfig(format=c.LOGGING_FORMAT, level=c.LOGGING_DEFAULT_LEVEL)
led_test()
|
Hasan-Baig/SYSC3010_Home_Pixel
|
lightclapper/led.py
|
led.py
|
py
| 2,584 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "constants.LED_OFF",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "RPi.GPIO.setmode",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "RPi.GPIO",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "RPi.GPIO.BCM",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "RPi.GPIO.setwarnings",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "RPi.GPIO",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "RPi.GPIO.setup",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "RPi.GPIO",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "RPi.GPIO.OUT",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "constants.LED_ON",
"line_number": 81,
"usage_type": "attribute"
},
{
"api_name": "RPi.GPIO.HIGH",
"line_number": 82,
"usage_type": "attribute"
},
{
"api_name": "RPi.GPIO",
"line_number": 82,
"usage_type": "name"
},
{
"api_name": "RPi.GPIO.LOW",
"line_number": 85,
"usage_type": "attribute"
},
{
"api_name": "RPi.GPIO",
"line_number": 85,
"usage_type": "name"
},
{
"api_name": "RPi.GPIO.output",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "RPi.GPIO",
"line_number": 88,
"usage_type": "name"
},
{
"api_name": "logging.debug",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "constants.LED_ON",
"line_number": 113,
"usage_type": "attribute"
},
{
"api_name": "time.sleep",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "constants.LED_OFF",
"line_number": 115,
"usage_type": "attribute"
},
{
"api_name": "RPi.GPIO.cleanup",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "RPi.GPIO",
"line_number": 117,
"usage_type": "name"
},
{
"api_name": "logging.basicConfig",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "constants.LOGGING_FORMAT",
"line_number": 121,
"usage_type": "attribute"
},
{
"api_name": "constants.LOGGING_DEFAULT_LEVEL",
"line_number": 121,
"usage_type": "attribute"
}
] |
40128251295
|
"""
http://www.sphinx-doc.org/en/stable/ext/doctest.html
https://github.com/sphinx-doc/sphinx/blob/master/sphinx/ext/doctest.py
* TODO
** CLEANUP: use the sphinx directive parser from the sphinx project
"""
import doctest
import enum
import re
import sys
import textwrap
import traceback
from pathlib import Path
from typing import TYPE_CHECKING
from typing import Any
from typing import Dict
from typing import Iterator
from typing import List
from typing import Optional
from typing import Tuple
from typing import Union
import _pytest.doctest
import pytest
from _pytest.config import Config
from _pytest.doctest import DoctestItem
from _pytest.doctest import _is_mocked
from _pytest.doctest import _patch_unwrap_mock_aware
from _pytest.main import Session
from _pytest.pathlib import import_path
from _pytest.python import Package
if TYPE_CHECKING:
import io
import pdb
from doctest import _Out
_SpoofOut = io.StringIO
class SphinxDoctestDirectives(enum.Enum):
TESTCODE = 1
TESTOUTPUT = 2
TESTSETUP = 3
TESTCLEANUP = 4
DOCTEST = 5
_DIRECTIVES_W_OPTIONS = (
SphinxDoctestDirectives.TESTOUTPUT,
SphinxDoctestDirectives.DOCTEST,
)
_DIRECTIVES_W_SKIPIF = (
SphinxDoctestDirectives.TESTCODE,
SphinxDoctestDirectives.TESTOUTPUT,
SphinxDoctestDirectives.TESTSETUP,
SphinxDoctestDirectives.TESTCLEANUP,
SphinxDoctestDirectives.DOCTEST,
)
def pytest_collect_file(
file_path: Path, parent: Union[Session, Package]
) -> Optional[Union["SphinxDoctestModule", "SphinxDoctestTextfile"]]:
config = parent.config
if file_path.suffix == ".py":
if config.option.doctestmodules:
mod: Union[
"SphinxDoctestModule", "SphinxDoctestTextfile"
] = SphinxDoctestModule.from_parent(parent, path=file_path)
return mod
elif _is_doctest(config, file_path, parent):
return SphinxDoctestTextfile.from_parent(parent, path=file_path) # type: ignore
return None
GlobDict = Dict[str, Any]
def _is_doctest(config: Config, path: Path, parent: Union[Session, Package]) -> bool:
if path.suffix in (".txt", ".rst") and parent.session.isinitpath(path):
return True
globs = config.getoption("doctestglob") or ["test*.txt"]
assert isinstance(globs, list)
for glob in globs:
if path.match(path_pattern=glob):
return True
return False
# This regular expression looks for option directives in the expected output
# (testoutput) code of an example. Option directives are comments starting
# with ":options:".
_OPTION_DIRECTIVE_RE = re.compile(r':options:\s*([^\n\'"]*)$')
_OPTION_SKIPIF_RE = re.compile(r':skipif:\s*([^\n\'"]*)$')
_DIRECTIVE_RE = re.compile(
r"""
\s*\.\.\s
(?P<directive>(testcode|testoutput|testsetup|testcleanup|doctest))
::\s*
(?P<argument>([^\n'"]*))
$
""",
re.VERBOSE,
)
def _split_into_body_and_options(
section_content: str,
) -> Tuple[str, Optional[str], Dict[int, bool]]:
"""Parse the the full content of a directive and split it.
It is split into a string, where the options (:options:, :hide: and
:skipif:) are removed, and into options.
If there are options in `section_content`, they have to appear at the
very beginning. The first line that is not an option (:options:, :hide:
and :skipif:) and not a newline is the first line of the string that is
returned (`remaining`).
Parameters
----------
section_content : str
String consisting of optional options (:skipif:, :hide:
or :options:), and of a body.
Returns
-------
body : str
skipif_expr : str or None
flag_settings : dict
Raises
------
ValueError
* If options and the body of the section are not
separated by a newline.
* If the body of the section is empty.
"""
lines = section_content.strip().splitlines()
skipif_expr = None
flag_settings = {}
i = 0
for line in lines:
stripped = line.strip()
if _OPTION_SKIPIF_RE.match(stripped):
skipif_match = _OPTION_SKIPIF_RE.match(stripped)
assert skipif_match is not None
skipif_expr = skipif_match.group(1)
i += 1
elif _OPTION_DIRECTIVE_RE.match(stripped):
directive_match = _OPTION_DIRECTIVE_RE.match(stripped)
assert directive_match is not None
option_strings = directive_match.group(1).replace(",", " ").split()
for option in option_strings:
if (
option[0] not in "+-"
or option[1:] not in doctest.OPTIONFLAGS_BY_NAME
):
raise ValueError(f"doctest has an invalid option {option}")
flag = doctest.OPTIONFLAGS_BY_NAME[option[1:]]
flag_settings[flag] = option[0] == "+"
i += 1
elif stripped == ":hide:":
i += 1
else:
break
if i == len(lines):
raise ValueError("no code/output")
body = "\n".join(lines[i:]).lstrip()
if not body:
raise ValueError("no code/output")
if i and lines[i].strip():
# no newline between option block and body
raise ValueError(f"invalid option block: {section_content!r}")
return body, skipif_expr, flag_settings
def _get_next_textoutputsections(
sections: List["Section"], index: int
) -> Iterator["Section"]:
"""Yield successive TESTOUTPUT sections."""
for j in range(index, len(sections)):
section = sections[j]
if section.directive == SphinxDoctestDirectives.TESTOUTPUT:
yield section
else:
break
SectionGroups = Optional[List[str]]
class Section:
def __init__(
self,
directive: SphinxDoctestDirectives,
content: str,
lineno: int,
groups: SectionGroups = None,
) -> None:
super().__init__()
self.directive = directive
self.groups = groups
self.lineno = lineno
body, skipif_expr, options = _split_into_body_and_options(content)
if skipif_expr and self.directive not in _DIRECTIVES_W_SKIPIF:
raise ValueError(f":skipif: not allowed in {self.directive}")
if options and self.directive not in _DIRECTIVES_W_OPTIONS:
raise ValueError(f":options: not allowed in {self.directive}")
self.body = body
self.skipif_expr = skipif_expr
self.options = options
def get_sections(docstring: str) -> List[Union[Any, Section]]:
lines = textwrap.dedent(docstring).splitlines()
sections = []
def _get_indentation(line: str) -> int:
return len(line) - len(line.lstrip())
def add_match(
directive: SphinxDoctestDirectives, i: int, j: int, groups: SectionGroups
) -> None:
sections.append(
Section(
directive,
textwrap.dedent("\n".join(lines[i + 1 : j])),
lineno=j - 1,
groups=groups,
)
)
i = 0
while True:
try:
line = lines[i]
except IndexError:
break
match = _DIRECTIVE_RE.match(line)
if match:
group = match.groupdict()
directive = getattr(SphinxDoctestDirectives, group["directive"].upper())
groups = [x.strip() for x in (group["argument"] or "default").split(",")]
indentation = _get_indentation(line)
# find the end of the block
j = i
while True:
j += 1
try:
block_line = lines[j]
except IndexError:
add_match(directive, i, j, groups)
break
if block_line.lstrip() and _get_indentation(block_line) <= indentation:
add_match(directive, i, j, groups)
i = j - 1
break
i += 1
return sections
def docstring2examples(
docstring: str, globs: Optional[GlobDict] = None
) -> List[Union[Any, doctest.Example]]:
"""
Parse all sphinx test directives in the docstring and create a
list of examples.
"""
# TODO subclass doctest.DocTestParser instead?
if globs is None:
globs = {}
sections = get_sections(docstring)
def get_testoutput_section_data(
section: "Section",
) -> Tuple[str, Dict[int, bool], int, Optional[Any]]:
want = section.body
exc_msg = None
options: Dict[int, bool] = {}
if section.skipif_expr and eval(section.skipif_expr, globs):
want = ""
else:
options = section.options
match = doctest.DocTestParser._EXCEPTION_RE.match(want) # type: ignore
if match:
exc_msg = match.group("msg")
return want, options, section.lineno, exc_msg
examples = []
for i, current_section in enumerate(sections):
# TODO support SphinxDoctestDirectives.TESTSETUP, ...
if current_section.directive == SphinxDoctestDirectives.TESTCODE:
next_testoutput_sections = _get_next_textoutputsections(sections, i + 1)
section_data_seq = [
get_testoutput_section_data(s) for s in next_testoutput_sections
]
num_unskipped_sections = len([d for d in section_data_seq if d[0]])
if num_unskipped_sections > 1:
raise ValueError("There are multiple unskipped TESTOUTPUT sections")
if num_unskipped_sections:
want, options, _, exc_msg = next(d for d in section_data_seq if d[0])
else:
# no unskipped testoutput section
# do we really need doctest.Example to test
# independent TESTCODE sections?
want, options, exc_msg = "", {}, None
if current_section.skipif_expr and eval(current_section.skipif_expr, globs):
# TODO add the doctest.Example to `examples` but mark it as
# skipped.
continue
examples.append(
doctest.Example(
source=current_section.body,
want=want,
exc_msg=exc_msg,
# we want to see the ..testcode lines in the
# console output but not the ..testoutput
# lines
# TODO why do we want to hide testoutput??
lineno=current_section.lineno,
options=options,
)
)
return examples
class SphinxDocTestRunner(doctest.DebugRunner):
"""
overwrite doctest.DocTestRunner.__run, since it uses 'single' for the
`compile` function instead of 'exec'.
"""
_checker: "doctest.OutputChecker"
_fakeout: "_SpoofOut"
debugger: "pdb.Pdb"
def _DocTestRunner__run(
self, test: doctest.DocTest, compileflags: int, out: "_Out"
) -> doctest.TestResults:
"""
Run the examples in `test`.
Write the outcome of each example with one of the
`DocTestRunner.report_*` methods, using the writer function
`out`. `compileflags` is the set of compiler flags that should
be used to execute examples. Return a tuple `(f, t)`, where `t`
is the number of examples tried, and `f` is the number of
examples that failed. The examples are run in the namespace
`test.globs`.
"""
# Keep track of the number of failures and tries.
failures = tries = 0
# Save the option flags (since option directives can be used
# to modify them).
original_optionflags = self.optionflags
SUCCESS, FAILURE, BOOM = range(3) # `outcome` state
check = self._checker.check_output
# Process each example.
for examplenum, example in enumerate(test.examples):
# If REPORT_ONLY_FIRST_FAILURE is set, then suppress
# reporting after the first failure.
quiet = (
self.optionflags & doctest.REPORT_ONLY_FIRST_FAILURE and failures > 0
)
# Merge in the example's options.
self.optionflags = original_optionflags
if example.options:
for (optionflag, val) in example.options.items():
if val:
self.optionflags |= optionflag
else:
self.optionflags &= ~optionflag
# If 'SKIP' is set, then skip this example.
if self.optionflags & doctest.SKIP:
continue
# Record that we started this example.
tries += 1
if not quiet:
self.report_start(out, test, example)
# Use a special filename for compile(), so we can retrieve
# the source code during interactive debugging (see
# __patched_linecache_getlines).
filename = "<doctest %s[%d]>" % (test.name, examplenum)
# Run the example in the given context (globs), and record
# any exception that gets raised. (But don't intercept
# keyboard interrupts.)
try:
# Don't blink! This is where the user's code gets run.
exec(
compile(example.source, filename, "exec", compileflags, 1),
test.globs,
)
self.debugger.set_continue() # ==== Example Finished ====
exception = None
except KeyboardInterrupt:
raise
except Exception:
exception = sys.exc_info()
self.debugger.set_continue() # ==== Example Finished ====
got = self._fakeout.getvalue() # the actual output
self._fakeout.truncate(0)
outcome = FAILURE # guilty until proved innocent or insane
# If the example executed without raising any exceptions,
# verify its output.
if exception is None:
if check(example.want, got, self.optionflags):
outcome = SUCCESS
# The example raised an exception: check if it was expected.
else:
exc_msg = traceback.format_exception_only(*exception[:2])[-1]
if not quiet:
got += doctest._exception_traceback(exception) # type:ignore
# If `example.exc_msg` is None, then we weren't expecting
# an exception.
if example.exc_msg is None:
outcome = BOOM
# We expected an exception: see whether it matches.
elif check(example.exc_msg, exc_msg, self.optionflags):
outcome = SUCCESS
# Another chance if they didn't care about the detail.
elif self.optionflags & doctest.IGNORE_EXCEPTION_DETAIL:
if check(
doctest._strip_exception_details( # type:ignore
example.exc_msg,
),
doctest._strip_exception_details(exc_msg), # type:ignore
self.optionflags,
):
outcome = SUCCESS
# Report the outcome.
if outcome is SUCCESS:
if not quiet:
self.report_success(out, test, example, got)
elif outcome is FAILURE:
if not quiet:
self.report_failure(out, test, example, got)
failures += 1
elif outcome is BOOM:
if not quiet:
assert exception is not None
assert out is not None
self.report_unexpected_exception(
out,
test,
example,
exception, # type:ignore
)
failures += 1
else:
assert False, ("unknown outcome", outcome)
if failures and self.optionflags & doctest.FAIL_FAST:
break
# Restore the option flags (in case they were modified)
self.optionflags = original_optionflags
# Record and return the number of failures and tries.
self._DocTestRunner__record_outcome(test, failures, tries) # type:ignore
return doctest.TestResults(failures, tries)
class SphinxDocTestParser:
def get_doctest(
self,
docstring: str,
globs: Dict[str, Any],
name: str,
filename: str,
lineno: int,
) -> doctest.DocTest:
# TODO document why we need to overwrite? get_doctest
return doctest.DocTest(
examples=docstring2examples(docstring, globs=globs),
globs=globs,
name=name,
filename=filename,
lineno=lineno,
docstring=docstring,
)
class SphinxDoctestTextfile(pytest.Module):
obj = None
def collect(self) -> Iterator[_pytest.doctest.DoctestItem]:
# inspired by doctest.testfile; ideally we would use it directly,
# but it doesn't support passing a custom checker
encoding = self.config.getini("doctest_encoding")
text = self.fspath.read_text(encoding)
name = self.fspath.basename
optionflags = _pytest.doctest.get_optionflags(self) # type:ignore
runner = SphinxDocTestRunner(
verbose=False,
optionflags=optionflags,
checker=_pytest.doctest._get_checker(),
)
test = doctest.DocTest(
examples=docstring2examples(text),
globs={},
name=name,
filename=name,
lineno=0,
docstring=text,
)
if test.examples:
yield DoctestItem.from_parent(
parent=self, # type:ignore
name=test.name,
runner=runner,
dtest=test,
)
class SphinxDoctestModule(pytest.Module):
def collect(self) -> Iterator[_pytest.doctest.DoctestItem]:
if self.fspath.basename == "conftest.py":
module = self.config.pluginmanager._importconftest(
self.path,
self.config.getoption("importmode"),
rootpath=self.config.rootpath,
)
else:
try:
module = import_path(self.path, root=self.config.rootpath)
except ImportError:
if self.config.getvalue("doctest_ignore_import_errors"):
pytest.skip("unable to import module %r" % self.path)
else:
raise
optionflags = _pytest.doctest.get_optionflags(self) # type:ignore
class MockAwareDocTestFinder(doctest.DocTestFinder):
"""
a hackish doctest finder that overrides stdlib internals to fix
a stdlib bug
https://github.com/pytest-dev/pytest/issues/3456
https://bugs.python.org/issue25532
fix taken from https://github.com/pytest-dev/pytest/pull/4212/
"""
def _find(
self,
tests: List[doctest.DocTest],
obj: str,
name: str,
module: Any,
source_lines: Optional[List[str]],
globs: GlobDict,
seen: Dict[int, int],
) -> None:
if _is_mocked(obj):
return
with _patch_unwrap_mock_aware():
doctest.DocTestFinder._find( # type:ignore
self,
tests,
obj,
name,
module,
source_lines,
globs,
seen,
)
if sys.version_info < (3, 10):
finder = MockAwareDocTestFinder(
parser=SphinxDocTestParser() # type:ignore
)
else:
finder = doctest.DocTestFinder(parser=SphinxDocTestParser()) # type:ignore
runner = SphinxDocTestRunner(
verbose=False,
optionflags=optionflags,
checker=_pytest.doctest._get_checker(),
)
for test in finder.find(module, module.__name__):
if test.examples:
yield DoctestItem.from_parent(
parent=self, # type: ignore
name=test.name,
runner=runner,
dtest=test,
)
|
thisch/pytest-sphinx
|
src/pytest_sphinx.py
|
pytest_sphinx.py
|
py
| 20,904 |
python
|
en
|
code
| 27 |
github-code
|
6
|
[
{
"api_name": "typing.TYPE_CHECKING",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "io.StringIO",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "enum.Enum",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "pathlib.Path",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "_pytest.main.Session",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "_pytest.python.Package",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "_pytest.config.Config",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "_pytest.main.Session",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "_pytest.python.Package",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "re.compile",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "re.VERBOSE",
"line_number": 106,
"usage_type": "attribute"
},
{
"api_name": "doctest.OPTIONFLAGS_BY_NAME",
"line_number": 162,
"usage_type": "attribute"
},
{
"api_name": "doctest.OPTIONFLAGS_BY_NAME",
"line_number": 165,
"usage_type": "attribute"
},
{
"api_name": "typing.Tuple",
"line_number": 112,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 112,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 112,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 188,
"usage_type": "name"
},
{
"api_name": "typing.Iterator",
"line_number": 189,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 199,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 199,
"usage_type": "name"
},
{
"api_name": "textwrap.dedent",
"line_number": 226,
"usage_type": "call"
},
{
"api_name": "textwrap.dedent",
"line_number": 238,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line_number": 225,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 225,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 225,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 275,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 293,
"usage_type": "name"
},
{
"api_name": "doctest.DocTestParser._EXCEPTION_RE.match",
"line_number": 299,
"usage_type": "call"
},
{
"api_name": "doctest.DocTestParser",
"line_number": 299,
"usage_type": "attribute"
},
{
"api_name": "typing.Tuple",
"line_number": 290,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 290,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 290,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 290,
"usage_type": "name"
},
{
"api_name": "doctest.Example",
"line_number": 332,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line_number": 276,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 276,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 276,
"usage_type": "name"
},
{
"api_name": "doctest.Example",
"line_number": 276,
"usage_type": "attribute"
},
{
"api_name": "doctest.DebugRunner",
"line_number": 347,
"usage_type": "attribute"
},
{
"api_name": "doctest.DocTest",
"line_number": 358,
"usage_type": "attribute"
},
{
"api_name": "doctest.REPORT_ONLY_FIRST_FAILURE",
"line_number": 389,
"usage_type": "attribute"
},
{
"api_name": "doctest.SKIP",
"line_number": 402,
"usage_type": "attribute"
},
{
"api_name": "sys.exc_info",
"line_number": 429,
"usage_type": "call"
},
{
"api_name": "traceback.format_exception_only",
"line_number": 444,
"usage_type": "call"
},
{
"api_name": "doctest._exception_traceback",
"line_number": 446,
"usage_type": "call"
},
{
"api_name": "doctest.IGNORE_EXCEPTION_DETAIL",
"line_number": 458,
"usage_type": "attribute"
},
{
"api_name": "doctest._strip_exception_details",
"line_number": 460,
"usage_type": "call"
},
{
"api_name": "doctest._strip_exception_details",
"line_number": 463,
"usage_type": "call"
},
{
"api_name": "doctest.FAIL_FAST",
"line_number": 490,
"usage_type": "attribute"
},
{
"api_name": "doctest.TestResults",
"line_number": 498,
"usage_type": "call"
},
{
"api_name": "doctest.TestResults",
"line_number": 359,
"usage_type": "attribute"
},
{
"api_name": "typing.Dict",
"line_number": 505,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 505,
"usage_type": "name"
},
{
"api_name": "doctest.DocTest",
"line_number": 511,
"usage_type": "call"
},
{
"api_name": "doctest.DocTest",
"line_number": 509,
"usage_type": "attribute"
},
{
"api_name": "pytest.Module",
"line_number": 521,
"usage_type": "attribute"
},
{
"api_name": "_pytest.doctest.doctest.get_optionflags",
"line_number": 531,
"usage_type": "call"
},
{
"api_name": "_pytest.doctest.doctest",
"line_number": 531,
"usage_type": "attribute"
},
{
"api_name": "_pytest.doctest",
"line_number": 531,
"usage_type": "name"
},
{
"api_name": "_pytest.doctest.doctest._get_checker",
"line_number": 535,
"usage_type": "call"
},
{
"api_name": "_pytest.doctest.doctest",
"line_number": 535,
"usage_type": "attribute"
},
{
"api_name": "_pytest.doctest",
"line_number": 535,
"usage_type": "name"
},
{
"api_name": "doctest.DocTest",
"line_number": 538,
"usage_type": "call"
},
{
"api_name": "_pytest.doctest.DoctestItem.from_parent",
"line_number": 548,
"usage_type": "call"
},
{
"api_name": "_pytest.doctest.DoctestItem",
"line_number": 548,
"usage_type": "name"
},
{
"api_name": "typing.Iterator",
"line_number": 524,
"usage_type": "name"
},
{
"api_name": "_pytest.doctest.doctest",
"line_number": 524,
"usage_type": "attribute"
},
{
"api_name": "_pytest.doctest",
"line_number": 524,
"usage_type": "name"
},
{
"api_name": "pytest.Module",
"line_number": 556,
"usage_type": "attribute"
},
{
"api_name": "_pytest.pathlib.import_path",
"line_number": 566,
"usage_type": "call"
},
{
"api_name": "pytest.skip",
"line_number": 569,
"usage_type": "call"
},
{
"api_name": "_pytest.doctest.doctest.get_optionflags",
"line_number": 572,
"usage_type": "call"
},
{
"api_name": "_pytest.doctest.doctest",
"line_number": 572,
"usage_type": "attribute"
},
{
"api_name": "_pytest.doctest",
"line_number": 572,
"usage_type": "name"
},
{
"api_name": "doctest.DocTestFinder",
"line_number": 574,
"usage_type": "attribute"
},
{
"api_name": "typing.List",
"line_number": 586,
"usage_type": "name"
},
{
"api_name": "doctest.DocTest",
"line_number": 586,
"usage_type": "attribute"
},
{
"api_name": "typing.Any",
"line_number": 589,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 590,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 590,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 592,
"usage_type": "name"
},
{
"api_name": "_pytest.doctest._is_mocked",
"line_number": 594,
"usage_type": "call"
},
{
"api_name": "_pytest.doctest._patch_unwrap_mock_aware",
"line_number": 596,
"usage_type": "call"
},
{
"api_name": "doctest.DocTestFinder._find",
"line_number": 597,
"usage_type": "call"
},
{
"api_name": "doctest.DocTestFinder",
"line_number": 597,
"usage_type": "attribute"
},
{
"api_name": "sys.version_info",
"line_number": 608,
"usage_type": "attribute"
},
{
"api_name": "doctest.DocTestFinder",
"line_number": 613,
"usage_type": "call"
},
{
"api_name": "_pytest.doctest.doctest._get_checker",
"line_number": 618,
"usage_type": "call"
},
{
"api_name": "_pytest.doctest.doctest",
"line_number": 618,
"usage_type": "attribute"
},
{
"api_name": "_pytest.doctest",
"line_number": 618,
"usage_type": "name"
},
{
"api_name": "_pytest.doctest.DoctestItem.from_parent",
"line_number": 623,
"usage_type": "call"
},
{
"api_name": "_pytest.doctest.DoctestItem",
"line_number": 623,
"usage_type": "name"
},
{
"api_name": "typing.Iterator",
"line_number": 557,
"usage_type": "name"
},
{
"api_name": "_pytest.doctest.doctest",
"line_number": 557,
"usage_type": "attribute"
},
{
"api_name": "_pytest.doctest",
"line_number": 557,
"usage_type": "name"
}
] |
10808451041
|
from typing import Dict, Tuple
from AnalysisOfUnstructuredData.helpers.hsc.publisher import Publisher
class PublisherRelation:
titles_types: Dict[str, str]
_id: Tuple[int, int]
def __init__(self, publisher_1: Publisher, publisher_2: Publisher):
self.publisher_1 = publisher_1
self.publisher_2 = publisher_2
self.titles_types = {}
self.id = (publisher_1.id, publisher_2.id)
@property
def id(self):
return self._id
@id.setter
def id(self, set_val: Tuple[int, int]):
if set_val[0] > set_val[1]:
self._id = (set_val[1], set_val[0])
else:
self._id = set_val
def add_publication(self, publication_type: str, title: str):
self.titles_types[title] = publication_type
@property
def times_linked(self):
return len(self.titles_types)
def __eq__(self, other: 'PublisherRelation'):
return self.id == other.id
def __hash__(self):
return self.id
def html_label(self) -> str:
lab = """{} publications of {}. {} and {}. {}.
<table border="1" class="dataframe">\n\t<thead>\n\t\t<tr style="text-align: left;">\n\t\t\t<th>Type</th>
\t\t\t<th>Title</th>\n\t\t</tr>\n\t</thead>\n\t<tbody>\n""".format(
self.times_linked,
self.publisher_1.name, '-'.join(self.publisher_1.surname),
self.publisher_2.name, '-'.join(self.publisher_2.surname)
)
lab += '\n'.join(["\t\t<tr>\n\t\t\t<td>{}</td>\n\t\t\t<td>{}</td>\n\t\t</tr>".format(p_type, title)
for title, p_type in self.titles_types.items()])
lab += '\n\t</tbody>\n</table>'
return lab
|
TheDecks/Studies
|
AnalysisOfUnstructuredData/helpers/hsc/relation.py
|
relation.py
|
py
| 1,688 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "typing.Dict",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "AnalysisOfUnstructuredData.helpers.hsc.publisher.Publisher",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 22,
"usage_type": "name"
}
] |
40319717427
|
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.ansibleguy.opnsense.plugins.module_utils.base.handler import \
module_dependency_error, MODULE_EXCEPTIONS
try:
from ansible_collections.ansibleguy.opnsense.plugins.module_utils.base.api import single_post
from ansible_collections.ansibleguy.opnsense.plugins.module_utils.defaults.main import OPN_MOD_ARGS
from ansible_collections.ansibleguy.opnsense.plugins.module_utils.helper.system import wait_for_response
except MODULE_EXCEPTIONS:
module_dependency_error()
DOCUMENTATION = 'https://opnsense.ansibleguy.net/en/latest/modules/package.html'
EXAMPLES = 'https://opnsense.ansibleguy.net/en/latest/modules/package.html'
def run_module():
module_args = dict(
action=dict(
type='str', required=True,
choices=['poweroff', 'reboot', 'update', 'upgrade', 'audit']
),
wait=dict(type='bool', required=False, default=True),
wait_timeout=dict(type='int', required=False, default=90),
poll_interval=dict(type='int', required=False, default=2),
**OPN_MOD_ARGS
)
module = AnsibleModule(
argument_spec=module_args,
supports_check_mode=True,
)
result = {
'changed': True,
'failed': False,
'timeout_exceeded': False,
}
if not module.check_mode:
single_post(
module=module,
cnf={
'command': module.params['action'],
'module': 'core',
'controller': 'firmware',
}
)
if module.params['action'] in ['reboot', 'upgrade'] and module.params['wait']:
if module.params['debug']:
module.warn(f"Waiting for firewall to complete '{module.params['action']}'!")
result['failed'] = not wait_for_response(module=module)
if result['failed']:
result['timeout_exceeded'] = True
module.exit_json(**result)
def main():
run_module()
if __name__ == '__main__':
main()
|
ansibleguy/collection_opnsense
|
plugins/modules/system.py
|
system.py
|
py
| 2,071 |
python
|
en
|
code
| 158 |
github-code
|
6
|
[
{
"api_name": "ansible_collections.ansibleguy.opnsense.plugins.module_utils.base.handler.MODULE_EXCEPTIONS",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "ansible_collections.ansibleguy.opnsense.plugins.module_utils.base.handler.module_dependency_error",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "ansible_collections.ansibleguy.opnsense.plugins.module_utils.defaults.main.OPN_MOD_ARGS",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "ansible.module_utils.basic.AnsibleModule",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "ansible_collections.ansibleguy.opnsense.plugins.module_utils.base.api.single_post",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "ansible_collections.ansibleguy.opnsense.plugins.module_utils.helper.system.wait_for_response",
"line_number": 55,
"usage_type": "call"
}
] |
4456513686
|
from dataclasses import dataclass
from fnmatch import fnmatch
from typing import cast, Optional
from urllib.parse import urljoin
from .config import Config
from .download import DownloaderMixIn, HTTPXDownloaderMixIn
from .exceptions import (
ArbitrarySoftwareAttack,
DownloadNotFoundError,
EndlessDataAttack,
FreezeAttack,
InconsistentTargetError,
MixAndMatchAttack,
NoConsistentSnapshotsError,
RollbackAttack,
TargetNotFoundError,
)
from .models.common import (
Comparable,
Filepath,
Hash,
Hashes,
Length,
Positive,
Rolename,
Rolenames,
Url,
Version,
)
from .models.metadata import (
Metadata,
Root,
Signed,
Snapshot,
TargetFile,
Targets,
ThresholdOfPublicKeys,
TimeSnap,
Timestamp,
)
from .readers import JSONReaderMixIn, ReaderMixIn
from .writers import WriterMixIn
@dataclass
class Target:
path: Filepath
target: TargetFile
# This is a Repository, not a Client, because I want to make it clear that you
# can compose these objects to traverse multiple Repositories.
class Repository(WriterMixIn, DownloaderMixIn, ReaderMixIn):
"""A class to abstractly handle the TUF client application workflow for a
single repository.
Do not instantiate this class."""
ROOT_ROLENAME = "root"
SNAPSHOT_ROLENAME = "snapshot"
TARGETS_ROLENAME = "targets"
TIMESTAMP_ROLENAME = "timestamp"
def __init__(self, config: Config):
super().init_downloader()
self.config = config
self.__refresh()
def close(self) -> None:
self.config.close()
super().close_downloader()
def __check_expiry(self, signed: Signed) -> None:
if signed.expires <= self.config.NOW:
raise FreezeAttack(f"{signed}: {signed.expires} <= {self.config.NOW}")
def __check_hashes(self, abspath: Filepath, expected: Hashes) -> None:
if not self.check_hashes(abspath, expected):
raise ArbitrarySoftwareAttack(f"{abspath} != {expected}")
def __check_length(self, abspath: Filepath, expected: Length) -> None:
if not self.check_length(abspath, expected):
raise EndlessDataAttack(f"{abspath} > {expected} bytes")
def __check_rollback(self, prev: Comparable, curr: Comparable) -> None:
if prev > curr:
raise RollbackAttack(f"{prev} > {curr}")
def __check_signatures(
self, role: ThresholdOfPublicKeys, metadata: Metadata
) -> None:
if not role.verified(metadata.signatures, metadata.canonical):
raise ArbitrarySoftwareAttack(f"{metadata.signed}")
def __check_version(self, signed: Signed, timesnap: TimeSnap) -> None:
if signed.version != timesnap.version:
raise MixAndMatchAttack(f"{signed.version} != {timesnap.version}")
def __local_metadata_filename(self, rolename: Rolename) -> Filepath:
return self.join_path(self.config.metadata_cache, self.role_filename(rolename))
def __local_targets_filename(self, relpath: Filepath) -> Filepath:
return self.join_path(self.config.targets_cache, relpath)
def __remote_metadata_filename(
self, rolename: Rolename, version: Version
) -> Filepath:
return f"{version.value}.{self.role_filename(rolename)}"
def __remote_metadata_path(self, relpath: Filepath) -> Url:
return urljoin(self.config.metadata_root, relpath)
def __remote_targets_path(self, relpath: Filepath, _hash: Hash) -> Url:
dirname, basename = self.split_path(relpath)
basename = f"{_hash}.{basename}"
relpath = self.join_path(dirname, basename)
return urljoin(self.config.targets_root, relpath)
def __refresh(self) -> None:
"""Refresh metadata for root, timestamp, and snapshot so that we have a
consistent snapshot of the repository."""
try:
self.__load_root()
self.__update_root()
self.__update_timestamp()
self.__update_snapshot()
except Exception:
self.close()
raise
def __load_root(self) -> None:
"""5.1. Load the trusted root metadata file."""
# NOTE: we must parse the root metadata file on disk in order to get
# the keys to verify itself in the first place.
filename = self.__local_metadata_filename(self.ROOT_ROLENAME)
metadata = self.read_from_file(filename)
# FIXME: the following line is purely to keep mypy happy; otherwise,
# it complains that the .signed.root attribute does not exist.
metadata.signed = cast(Root, metadata.signed)
# Verify self-signatures on previous root metadata file.
self.__check_signatures(metadata.signed.root, metadata)
# NOTE: the expiration of the trusted root metadata file does not
# matter, because we will attempt to update it in the next step.
# We do not support non-consistent-snapshot repositories.
if not metadata.signed.consistent_snapshot:
raise NoConsistentSnapshotsError
# Now that we have verified signatures, throw them away, and set the
# current root to the actual metadata of interest.
self.__root = metadata.signed
def __update_root(self) -> None:
"""5.2. Update the root metadata file."""
# 5.2.1. Let N denote the version number of the trusted root metadata
# file.
prev_root = self.__root
curr_root = prev_root
n = curr_root.version
# 5.2.8. Repeat steps 5.2.1 to 5.2.8.
for _ in range(self.config.MAX_ROOT_ROTATIONS):
# 5.2.2. Try downloading version N+1 of the root metadata file.
n += 1
remote_filename = self.__remote_metadata_filename(self.ROOT_ROLENAME, n)
remote_path = self.__remote_metadata_path(remote_filename)
try:
tmp_file = self.download(
remote_path, self.config.MAX_ROOT_LENGTH, self.config
)
except DownloadNotFoundError:
break
self.__check_length(tmp_file, self.config.MAX_ROOT_LENGTH)
# 5.2.3. Check for an arbitrary software attack.
metadata = self.read_from_file(tmp_file)
metadata.signed = cast(Root, metadata.signed)
self.__check_signatures(curr_root.root, metadata)
self.__check_signatures(metadata.signed.root, metadata)
# 5.2.4. Check for a rollback attack.
if metadata.signed.version != n:
raise RollbackAttack(
f"{metadata.signed.version} != {n} in {remote_path}"
)
# 5.2.5. Note that the expiration of the new (intermediate) root
# metadata file does not matter yet.
# 5.2.6. Set the trusted root metadata file to the new root metadata
# file.
curr_root = metadata.signed
# 5.2.9. Check for a freeze attack.
self.__check_expiry(curr_root)
if prev_root < curr_root:
# 5.2.11. Set whether consistent snapshots are used as per the
# trusted root metadata file.
# NOTE: We violate the spec in checking this *before* deleting local
# timestamp and/or snapshot metadata, which I think is reasonable.
if not curr_root.consistent_snapshot:
raise NoConsistentSnapshotsError
# 5.2.10. If the timestamp and / or snapshot keys have been rotated,
# then delete the trusted timestamp and snapshot metadata files.
if (
self.__root.timestamp != curr_root.timestamp
or self.__root.snapshot != curr_root.snapshot
):
filename = self.__local_metadata_filename(self.SNAPSHOT_ROLENAME)
if self.file_exists(filename):
self.rm_file(filename)
filename = self.__local_metadata_filename(self.TIMESTAMP_ROLENAME)
if self.file_exists(filename):
self.rm_file(filename)
# 5.2.7. Persist root metadata.
# NOTE: We violate the spec in persisting only *after* checking
# everything, which I think is reasonable.
self.mv_file(tmp_file, self.__local_metadata_filename(self.ROOT_ROLENAME))
self.__root = curr_root
def __get_prev_metadata(self, rolename: Rolename) -> Optional[Metadata]:
filename = self.__local_metadata_filename(rolename)
if self.file_exists(filename):
return self.read_from_file(filename)
return None
def __update_timestamp(self) -> None:
"""5.3. Download the timestamp metadata file."""
role_filename = self.role_filename(self.TIMESTAMP_ROLENAME)
remote_path = self.__remote_metadata_path(role_filename)
tmp_file = self.download(
remote_path, self.config.MAX_TIMESTAMP_LENGTH, self.config
)
self.__check_length(tmp_file, self.config.MAX_TIMESTAMP_LENGTH)
# 5.3.1. Check for an arbitrary software attack.
curr_metadata = self.read_from_file(tmp_file)
curr_metadata.signed = cast(Timestamp, curr_metadata.signed)
self.__check_signatures(self.__root.timestamp, curr_metadata)
# 5.3.2. Check for a rollback attack.
prev_metadata = self.__get_prev_metadata(self.TIMESTAMP_ROLENAME)
if prev_metadata:
prev_metadata.signed = cast(Timestamp, prev_metadata.signed)
self.__check_rollback(prev_metadata.signed, curr_metadata.signed)
self.__check_rollback(
prev_metadata.signed.snapshot, curr_metadata.signed.snapshot
)
# 5.3.3. Check for a freeze attack.
self.__check_expiry(curr_metadata.signed)
# 5.3.4. Persist timestamp metadata.
self.mv_file(tmp_file, self.__local_metadata_filename(self.TIMESTAMP_ROLENAME))
self.__timestamp = curr_metadata.signed
def __update_snapshot(self) -> None:
"""5.4. Download snapshot metadata file."""
prev_metadata = self.__get_prev_metadata(self.SNAPSHOT_ROLENAME)
obsolete = (
not prev_metadata
or prev_metadata.signed.version < self.__timestamp.snapshot.version
)
local_filename = self.__local_metadata_filename(self.SNAPSHOT_ROLENAME)
length = self.__timestamp.snapshot.length or self.config.MAX_SNAPSHOT_LENGTH
# Download metadata only if not cached or if it is obsolete.
if not obsolete:
tmp_file = local_filename
else:
remote_filename = self.__remote_metadata_filename(
self.SNAPSHOT_ROLENAME, self.__timestamp.snapshot.version
)
remote_path = self.__remote_metadata_path(remote_filename)
tmp_file = self.download(remote_path, length, self.config)
self.__check_length(tmp_file, length)
# 5.4.1. Check against timestamp role's snapshot hash.
if self.__timestamp.snapshot.hashes:
self.__check_hashes(tmp_file, self.__timestamp.snapshot.hashes)
# 5.4.2. Check for an arbitrary software attack.
curr_metadata = self.read_from_file(tmp_file)
curr_metadata.signed = cast(Snapshot, curr_metadata.signed)
self.__check_signatures(self.__root.snapshot, curr_metadata)
# 5.4.3. Check against timestamp role's snapshot version.
self.__check_version(curr_metadata.signed, self.__timestamp.snapshot)
# 5.4.4. Check for a rollback attack.
if prev_metadata:
prev_metadata.signed = cast(Snapshot, prev_metadata.signed)
for filename, prev_timesnap in prev_metadata.signed.targets.items():
curr_timesnap = curr_metadata.signed.targets.get(filename)
if not curr_timesnap:
raise RollbackAttack(
f"{filename} was in {prev_metadata.signed.version} but missing in {curr_metadata.signed.version}"
)
self.__check_rollback(prev_timesnap, curr_timesnap)
# 5.4.5. Check for a freeze attack.
self.__check_expiry(curr_metadata.signed)
# 5.4.6. Persist snapshot metadata.
if obsolete:
self.mv_file(tmp_file, local_filename)
self.__snapshot = curr_metadata.signed
def __preorder_dfs(
self,
targets: Targets,
target_relpath: Filepath,
visited: Rolenames,
counter: Positive,
) -> Optional[TargetFile]:
target_file = targets.targets.get(target_relpath)
if target_file:
return target_file
else:
for rolename, delegation in targets.delegations.items():
if rolename not in visited:
for path in delegation.paths:
if fnmatch(target_relpath, path):
target_file = self.__update_targets(
visited,
counter + 1,
rolename,
delegation.role,
target_relpath,
)
if target_file or delegation.terminating:
return target_file
return None
def __update_targets(
self,
visited: Rolenames,
counter: Positive,
rolename: Rolename,
role: ThresholdOfPublicKeys,
target_relpath: Filepath,
) -> Optional[TargetFile]:
"""5.5. Download the top-level targets metadata file."""
if rolename in visited or counter > self.config.MAX_PREORDER_DFS_VISITS:
return None
visited.add(rolename)
role_filename = self.role_filename(rolename)
timesnap = self.__snapshot.targets.get(role_filename)
if not timesnap:
raise MixAndMatchAttack(f"{rolename} not in {self.__snapshot}")
prev_metadata = self.__get_prev_metadata(rolename)
obsolete = not prev_metadata or prev_metadata.signed.version < timesnap.version
local_filename = self.__local_metadata_filename(rolename)
length = timesnap.length or self.config.MAX_TARGETS_LENGTH
# Download metadata only if not cached or if it is obsolete.
if not obsolete:
tmp_file = local_filename
else:
remote_filename = self.__remote_metadata_filename(
rolename, timesnap.version
)
remote_path = self.__remote_metadata_path(remote_filename)
tmp_file = self.download(remote_path, length, self.config)
self.__check_length(tmp_file, length)
# 5.5.1. Check against snapshot role's targets hash.
if timesnap.hashes:
self.__check_hashes(tmp_file, timesnap.hashes)
# 5.5.2. Check for an arbitrary software attack.
curr_metadata = self.read_from_file(tmp_file)
curr_metadata.signed = cast(Targets, curr_metadata.signed)
self.__check_signatures(role, curr_metadata)
# 5.5.3. Check against snapshot role's targets version.
self.__check_version(curr_metadata.signed, timesnap)
# 5.5.4. Check for a freeze attack.
self.__check_expiry(curr_metadata.signed)
# 5.5.5. Persist targets metadata.
if obsolete:
self.mv_file(tmp_file, local_filename)
# 5.5.6. Perform a pre-order depth-first search for metadata about the
# desired target, beginning with the top-level targets role.
return self.__preorder_dfs(
curr_metadata.signed, target_relpath, visited, counter
)
def __get_target(self, target_file: TargetFile, relpath: Filepath) -> Filepath:
for _hash in target_file.hashes.values():
remote_path = self.__remote_targets_path(relpath, _hash)
try:
return self.download(remote_path, target_file.length, self.config)
except DownloadNotFoundError:
continue
raise InconsistentTargetError(f"{relpath}")
# FIXME: consider using a context manager for cleanup.
def get(self, relpath: Filepath) -> Target:
"""Use this function to securely download and verify an update."""
try:
# 5.6. Verify the desired target against its targets metadata.
target_file = self.__update_targets(
set(), Positive(1), self.TARGETS_ROLENAME, self.__root.targets, relpath
)
# 5.6.2. Otherwise (if there is targets metadata about this target),
# download the target, and verify that its hashes match the targets
# metadata.
if target_file:
local_path = self.__local_targets_filename(relpath)
file_exists = self.file_exists(local_path)
# Download target only if not cached.
if file_exists:
tmp_file = local_path
else:
tmp_file = self.__get_target(target_file, relpath)
self.__check_length(tmp_file, target_file.length)
self.__check_hashes(tmp_file, target_file.hashes)
if not file_exists:
self.mv_file(tmp_file, local_path)
return Target(local_path, target_file)
except Exception as e:
self.close()
raise TargetNotFoundError(f"{relpath}") from e
else:
# 5.6.1. If there is no targets metadata about this target, abort
# the update cycle and report that there is no such target.
self.close()
raise TargetNotFoundError(f"{relpath}")
class JSONRepository(Repository, HTTPXDownloaderMixIn, JSONReaderMixIn):
"""Instantiate this class to read canonical JSON TUF metadata from a
remote repository."""
pass
|
trishankatdatadog/tuf-on-a-plane
|
src/tuf_on_a_plane/repository.py
|
repository.py
|
py
| 18,018 |
python
|
en
|
code
| 4 |
github-code
|
6
|
[
{
"api_name": "models.common.Filepath",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "models.metadata.TargetFile",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "dataclasses.dataclass",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "writers.WriterMixIn",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "download.DownloaderMixIn",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "readers.ReaderMixIn",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "config.Config",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "models.metadata.Signed",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "exceptions.FreezeAttack",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "models.common.Filepath",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "models.common.Hashes",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "exceptions.ArbitrarySoftwareAttack",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "models.common.Filepath",
"line_number": 82,
"usage_type": "name"
},
{
"api_name": "models.common.Length",
"line_number": 82,
"usage_type": "name"
},
{
"api_name": "exceptions.EndlessDataAttack",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "models.common.Comparable",
"line_number": 86,
"usage_type": "name"
},
{
"api_name": "exceptions.RollbackAttack",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "models.metadata.ThresholdOfPublicKeys",
"line_number": 91,
"usage_type": "name"
},
{
"api_name": "models.metadata.Metadata",
"line_number": 91,
"usage_type": "name"
},
{
"api_name": "exceptions.ArbitrarySoftwareAttack",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "models.metadata.Signed",
"line_number": 96,
"usage_type": "name"
},
{
"api_name": "models.metadata.TimeSnap",
"line_number": 96,
"usage_type": "name"
},
{
"api_name": "exceptions.MixAndMatchAttack",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "models.common.Rolename",
"line_number": 100,
"usage_type": "name"
},
{
"api_name": "models.common.Filepath",
"line_number": 100,
"usage_type": "name"
},
{
"api_name": "models.common.Filepath",
"line_number": 103,
"usage_type": "name"
},
{
"api_name": "models.common.Rolename",
"line_number": 107,
"usage_type": "name"
},
{
"api_name": "models.common.Version",
"line_number": 107,
"usage_type": "name"
},
{
"api_name": "models.common.Filepath",
"line_number": 108,
"usage_type": "name"
},
{
"api_name": "models.common.Filepath",
"line_number": 111,
"usage_type": "name"
},
{
"api_name": "urllib.parse.urljoin",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "models.common.Url",
"line_number": 111,
"usage_type": "name"
},
{
"api_name": "models.common.Filepath",
"line_number": 114,
"usage_type": "name"
},
{
"api_name": "models.common.Hash",
"line_number": 114,
"usage_type": "name"
},
{
"api_name": "urllib.parse.urljoin",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "models.common.Url",
"line_number": 114,
"usage_type": "name"
},
{
"api_name": "typing.cast",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "models.metadata.Root",
"line_number": 141,
"usage_type": "argument"
},
{
"api_name": "exceptions.NoConsistentSnapshotsError",
"line_number": 151,
"usage_type": "name"
},
{
"api_name": "exceptions.DownloadNotFoundError",
"line_number": 175,
"usage_type": "name"
},
{
"api_name": "typing.cast",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "models.metadata.Root",
"line_number": 181,
"usage_type": "argument"
},
{
"api_name": "exceptions.RollbackAttack",
"line_number": 187,
"usage_type": "call"
},
{
"api_name": "exceptions.NoConsistentSnapshotsError",
"line_number": 207,
"usage_type": "name"
},
{
"api_name": "models.common.Rolename",
"line_number": 229,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 229,
"usage_type": "name"
},
{
"api_name": "models.metadata.Metadata",
"line_number": 229,
"usage_type": "name"
},
{
"api_name": "typing.cast",
"line_number": 246,
"usage_type": "call"
},
{
"api_name": "models.metadata.Timestamp",
"line_number": 246,
"usage_type": "argument"
},
{
"api_name": "typing.cast",
"line_number": 252,
"usage_type": "call"
},
{
"api_name": "models.metadata.Timestamp",
"line_number": 252,
"usage_type": "argument"
},
{
"api_name": "typing.cast",
"line_number": 293,
"usage_type": "call"
},
{
"api_name": "models.metadata.Snapshot",
"line_number": 293,
"usage_type": "argument"
},
{
"api_name": "typing.cast",
"line_number": 301,
"usage_type": "call"
},
{
"api_name": "models.metadata.Snapshot",
"line_number": 301,
"usage_type": "argument"
},
{
"api_name": "exceptions.RollbackAttack",
"line_number": 306,
"usage_type": "call"
},
{
"api_name": "models.metadata.Targets",
"line_number": 321,
"usage_type": "name"
},
{
"api_name": "models.common.Filepath",
"line_number": 322,
"usage_type": "name"
},
{
"api_name": "models.common.Rolenames",
"line_number": 323,
"usage_type": "name"
},
{
"api_name": "models.common.Positive",
"line_number": 324,
"usage_type": "name"
},
{
"api_name": "fnmatch.fnmatch",
"line_number": 333,
"usage_type": "call"
},
{
"api_name": "typing.Optional",
"line_number": 325,
"usage_type": "name"
},
{
"api_name": "models.metadata.TargetFile",
"line_number": 325,
"usage_type": "name"
},
{
"api_name": "models.common.Rolenames",
"line_number": 347,
"usage_type": "name"
},
{
"api_name": "models.common.Positive",
"line_number": 348,
"usage_type": "name"
},
{
"api_name": "models.common.Rolename",
"line_number": 349,
"usage_type": "name"
},
{
"api_name": "models.metadata.ThresholdOfPublicKeys",
"line_number": 350,
"usage_type": "name"
},
{
"api_name": "models.common.Filepath",
"line_number": 351,
"usage_type": "name"
},
{
"api_name": "exceptions.MixAndMatchAttack",
"line_number": 361,
"usage_type": "call"
},
{
"api_name": "typing.cast",
"line_number": 386,
"usage_type": "call"
},
{
"api_name": "models.metadata.Targets",
"line_number": 386,
"usage_type": "argument"
},
{
"api_name": "typing.Optional",
"line_number": 352,
"usage_type": "name"
},
{
"api_name": "models.metadata.TargetFile",
"line_number": 352,
"usage_type": "name"
},
{
"api_name": "models.metadata.TargetFile",
"line_number": 405,
"usage_type": "name"
},
{
"api_name": "models.common.Filepath",
"line_number": 405,
"usage_type": "name"
},
{
"api_name": "exceptions.DownloadNotFoundError",
"line_number": 410,
"usage_type": "name"
},
{
"api_name": "exceptions.InconsistentTargetError",
"line_number": 412,
"usage_type": "call"
},
{
"api_name": "models.common.Filepath",
"line_number": 415,
"usage_type": "name"
},
{
"api_name": "models.common.Positive",
"line_number": 420,
"usage_type": "call"
},
{
"api_name": "exceptions.TargetNotFoundError",
"line_number": 445,
"usage_type": "call"
},
{
"api_name": "exceptions.TargetNotFoundError",
"line_number": 451,
"usage_type": "call"
},
{
"api_name": "download.HTTPXDownloaderMixIn",
"line_number": 454,
"usage_type": "name"
},
{
"api_name": "readers.JSONReaderMixIn",
"line_number": 454,
"usage_type": "name"
}
] |
11322722554
|
from keras.models import *
from keras.layers import *
from model.model_basic import BasicDeepModel
from keras.utils.vis_utils import plot_model
from keras import regularizers
dp = 7
filter_nr = 64
filter_size = 3
max_pool_size = 3
max_pool_strides = 2
dense_nr = 256
spatial_dropout = 0.2
dense_dropout = 0.5
conv_kern_reg = regularizers.l2(0.00001)
conv_bias_reg = regularizers.l2(0.00001)
class DpcnnModel(BasicDeepModel):
def __init__(self, name='basicModel', num_flods=5, config=None):
name = 'dpcnn' + config.main_feature
BasicDeepModel.__init__(self, name=name, n_folds=num_flods, config=config)
def create_model(self):
char_embedding = Embedding(self.max_c_features, self.char_embed_size, weights=[self.char_embedding], trainable=True, name='char_embedding')
word_embedding = Embedding(self.max_w_features, self.word_embed_size, weights=[self.word_embedding], trainable=True, name='word_embedding')
char_input = Input(shape=(self.char_max_len,), name='char')
word_input = Input(shape=(self.word_max_len,), name='word')
if not self.config.main_feature == 'char':
char_input, word_input = word_input, char_input
char_embedding, word_embedding = word_embedding, char_embedding
self.char_max_len, self.word_max_len = self.word_max_len, self.char_max_len
x = char_embedding(char_input)
x = BatchNormalization()(x)
block1 = Conv1D(filter_nr, kernel_size=filter_size, padding='same', activation='linear', kernel_regularizer=conv_kern_reg, bias_regularizer=conv_bias_reg)(x)
block1 = BatchNormalization()(block1)
block1 = PReLU()(block1)
block1 = Conv1D(filter_nr, kernel_size=filter_size, padding='same', activation='linear', kernel_regularizer=conv_kern_reg, bias_regularizer=conv_bias_reg)(block1)
block1 = BatchNormalization()(block1)
block1 = PReLU()(block1)
# we pass embedded comment through conv1d with filter size 1 because it needs to have the same shape as block output
# if you choose filter_nr = embed_size (300 in this case) you don't have to do this part and can add emb_comment directly to block1_output
resize_emb = Conv1D(filter_nr, kernel_size=1, padding='same', activation='linear', kernel_regularizer=conv_kern_reg, bias_regularizer=conv_bias_reg)(x)
resize_emb = PReLU()(resize_emb)
block1_output = add([block1, resize_emb])
x = MaxPooling1D(pool_size=max_pool_size, strides=max_pool_strides)(block1_output)
for i in range(dp):
block1 = Conv1D(filter_nr, kernel_size=filter_size, padding='same', activation='linear', kernel_regularizer=conv_kern_reg, bias_regularizer=conv_bias_reg)(x)
block1 = BatchNormalization()(block1)
block1 = PReLU()(block1)
block1 = Conv1D(filter_nr, kernel_size=filter_size, padding='same', activation='linear', kernel_regularizer=conv_kern_reg, bias_regularizer=conv_bias_reg)(block1)
block1 = BatchNormalization()(block1)
block1 = PReLU()(block1)
block_output = add([block1, x])
print(i)
if i + 1 != dp:
x = MaxPooling1D(pool_size=max_pool_size, strides=max_pool_strides)(block_output)
x = GlobalMaxPooling1D()(block_output)
output = Dense(dense_nr, activation='linear')(x)
output = BatchNormalization()(output)
output = PReLU()(output)
if self.config.main_feature == 'all':
recurrent_units = 60
word_embedding = Embedding(self.max_w_features, self.word_embed_size, weights=[self.word_embedding], trainable=False, name='word_embedding')
word_input = Input(shape=(self.word_max_len,), name='word')
word_embedding_layer = word_embedding(word_input)
word_embedding_layer = SpatialDropout1D(0.5)(word_embedding_layer)
word_rnn_1 = Bidirectional(CuDNNGRU(recurrent_units // 2, return_sequences=True))(word_embedding_layer)
word_rnn_1 = SpatialDropout1D(0.5)(word_rnn_1)
word_rnn_2 = Bidirectional(CuDNNGRU(recurrent_units // 2, return_sequences=True))(word_rnn_1)
word_maxpool = GlobalMaxPooling1D()(word_rnn_2)
word_average = GlobalAveragePooling1D()(word_rnn_2)
output = concatenate([output, word_maxpool, word_average], axis=-1)
output = Dropout(dense_dropout)(output)
dense2 = Dense(self.n_class, activation="softmax")(output)
res_model = Model(inputs=[char_input, word_input], outputs=dense2)
else:
output = Dropout(dense_dropout)(output)
# dense2 = Dense(self.n_class, activation="softmax", kernel_regularizer=regularizers.l2(self.wd))(output)
dense2 = Dense(self.n_class, activation="softmax")(output)
res_model = Model(inputs=[char_input], outputs=dense2)
plot_model(res_model, to_file="{}.png".format(self.name), show_shapes=True)
return res_model
|
nlpjoe/daguan-classify-2018
|
src/model/dpcnn_model.py
|
dpcnn_model.py
|
py
| 5,042 |
python
|
en
|
code
| 154 |
github-code
|
6
|
[
{
"api_name": "keras.regularizers.l2",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "keras.regularizers",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "keras.regularizers.l2",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "keras.regularizers",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "model.model_basic.BasicDeepModel",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "model.model_basic.BasicDeepModel.__init__",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "model.model_basic.BasicDeepModel",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "keras.utils.vis_utils.plot_model",
"line_number": 95,
"usage_type": "call"
}
] |
22906645455
|
import xlrd
class XLDateInfo(object):
def __init__(self, path=''):
self.xl = xlrd.open_workbook(path)
self.sheet = None
def get_sheet_info_by_name(self, name):
self.sheet = self.xl.sheet_by_name(name)
return self.get_sheet_info()
def get_sheet_info(self):
infolist = []
for row in range(0, self.sheet.nrows):
info = self.sheet.row_values(row)
infolist.append(info)
return infolist
if __name__ == "__main__":
data_info = XLDateInfo(r'..\test_data\get_params_headers_data.xlsx')
all_data = data_info.get_sheet_info_by_name('TestData')
print(all_data)
|
weijianhui011/uploadfile
|
public/read_excel.py
|
read_excel.py
|
py
| 659 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "xlrd.open_workbook",
"line_number": 6,
"usage_type": "call"
}
] |
30980414030
|
from matplotlib.pyplot import draw
import pygame
from pygame.locals import *
pygame.init()
pygame.mixer.init()
# set screen resolution
resolution = (725,725)
# open a screen of above resolution
screen = pygame.display.set_mode(resolution)
# storing screen variable values
width = screen.get_width()
height = screen.get_height()
# text_on_screen() not affected by catPath variable
# main_menu() not affected by catPath variable
# tableGen() not affected by catPath variable
# get appropriate cat path to display in outcome screen
def getCatPath(state):
if state == 0:
catPath = "assets/wincat.png"
elif state == 1:
catPath = "assets/losecat.png"
elif state == 2:
catPath = "assets/draw.png"
return catPath
# updateScore() not affected by catPath variable
# displayScore() not affected by catPath variable
# gameTime() not affected by catPath variable
# checkWinner() not affected by catPath variable
# getDice() not affected by catPath variable
# showDice() not affected by catPath variable
# gameLogic() not affected by catPath variable
# user won
def winScreen(die1, die2, num, score):
status = num
# get path to cat image based on win/lose/draw state passed
catPath = getCatPath(status)
cat = pygame.transform.scale(pygame.image.load(catPath).convert_alpha(), (400, 450))
#display cat
screen.blit(cat, ((width/4),(height/2)-325))
# screen for when user loses
def loseScreen(die1, die2, num, score):
status = num
# get path to cat image based on win/lose/draw state passed
catPath = getCatPath(status)
cat = pygame.transform.scale(pygame.image.load(catPath).convert_alpha(), (400, 450))
#display cat
screen.blit(cat, ((width/4),(height/2)-325))
# screen for when computer and user dice are equal
def drawScreen(die1, num, score):
status = num
# get appropriate cat based on game status (win/loss/draw)
catPath = getCatPath(status)
cat = pygame.transform.scale(pygame.image.load(catPath).convert_alpha(), (400, 450))
# Display cat on screeen
screen.blit(cat, ((width/4),(height/2)-325))
|
jessica-leishman/high-rollers
|
analysis_static/manual slices/hrStatic4.py
|
hrStatic4.py
|
py
| 2,135 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pygame.init",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pygame.mixer.init",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pygame.mixer",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.set_mode",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "pygame.transform.scale",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "pygame.transform",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "pygame.image.load",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "pygame.transform.scale",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "pygame.transform",
"line_number": 66,
"usage_type": "attribute"
},
{
"api_name": "pygame.image.load",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 66,
"usage_type": "attribute"
},
{
"api_name": "pygame.transform.scale",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "pygame.transform",
"line_number": 79,
"usage_type": "attribute"
},
{
"api_name": "pygame.image.load",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 79,
"usage_type": "attribute"
}
] |
27959320759
|
import os
from os import getenv
from dotenv import load_dotenv
load_dotenv()
# FOR CODES
API_ID = int(getenv("API_ID","2302493"))
API_HASH = getenv("API_HASH","1bf8344851a88633343fde339f2eee20")
SUDO_USERS = list(map(int, getenv("SUDO_USERS", "5366284852").split()))
LOGGER = int(getenv("LOGGER","-1001804302628"))
OWNER = int(getenv("OWNER_ID","5366284852"))
NAME = getenv("ALIVE_NAME","ANIRUDH OP")
OWN_USERNAME= getenv("OWN_USERNAME","@KATTAR_HINDU_OP")
ALIVE_PIC = getenv("ALIVE_PIC","https://te.legra.ph/file/a66beb72764269e744911.jpg")
# FOR SPAMBOT
TOKEN1 = getenv("TOKEN1", None)
TOKEN2 = getenv("TOKEN2", None)
TOKEN3 = getenv("TOKEN3", None)
TOKEN4 = getenv("TOKEN4", None)
TOKEN5 = getenv("TOKEN5", None)
TOKEN6 = getenv("TOKEN6", None)
TOKEN7 = getenv("TOKEN7", None)
TOKEN8 = getenv("TOKEN8", None)
TOKEN9 = getenv("TOKEN9", None)
TOKEN10 = getenv("TOKEN10", None)
|
Anirudh1212121/DcSpamBot
|
config.py
|
config.py
|
py
| 899 |
python
|
en
|
code
| null |
github-code
|
6
|
[
{
"api_name": "dotenv.load_dotenv",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 30,
"usage_type": "call"
}
] |
31350011958
|
import unittest
from seleniumwire import webdriver
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.common.by import By
#This code uses the google chrome browser to conduct a single test on the python.org website
#It uses the website search bar to search for "pycon"
# Selenium installation: 3 marks."pip install selenium"
# Conducting API testing: 5 marks."Created unittest for API Testing"
# Sharing the right code and configurations used for API testing: 5 marks."Test passed so code is right"
class PythonOrgSearch(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Chrome(ChromeDriverManager().install())
def test_search_in_python_org(self):
driver = self.driver
driver.get("http://www.python.org")
self.assertIn("Python", driver.title)
elem = driver.find_element(By.NAME, "q")
elem.send_keys("pycon")
elem.submit()
self.assertNotIn("No results found.", driver.page_source)
for i in driver.requests:
print("/n %s",i)
print(i.response)
a=a+1
def tearDown(self):
self.driver.close()
if __name__ == "__main__":
unittest.main()
|
Kitchlew/Summer
|
SQA/sel.py
|
sel.py
|
py
| 1,254 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "unittest.TestCase",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "seleniumwire.webdriver.Chrome",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "seleniumwire.webdriver",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "webdriver_manager.chrome.ChromeDriverManager",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.common.by.By.NAME",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "unittest.main",
"line_number": 36,
"usage_type": "call"
}
] |
16407449430
|
import mysql.connector
import csv
import matplotlib.pyplot as plt
import matplotlib
from matplotlib import colors as mcolors
import datetime
import nltk
from nltk.sentiment.vader import SentimentIntensityAnalyzer
colors = dict(mcolors.BASE_COLORS, **mcolors.CSS4_COLORS)
# Sort colors by hue, saturation, value and name.
by_hsv = sorted((tuple(mcolors.rgb_to_hsv(mcolors.to_rgba(color)[:3])), name)
for name, color in colors.items())
colors = [name for hsv, name in by_hsv]
hostname = 'localhost'
username = '' # no privlidges set so this works
password = ''
database = 'msr14'
conn = mysql.connector.connect(host=hostname, user=username, passwd=password, db=database)
# Simple routine to run a query on a database and print the results:
def queryDB(conn, query) :
cur = conn.cursor()
cur.execute(query)
return cur.fetchall()
## PR COMMENTS
project_ids = queryDB(conn, "select projects.id as id, pull_requests.pullreq_id as pullred_id from (select id from projects where forked_from is null) as projects join pull_requests on projects.id = pull_requests.base_repo_id;")[:-1]
projects_to_pr_comments = {}
sid = SentimentIntensityAnalyzer()
for ID, PR_ID in project_ids:
comments = "select created_at, body from pull_request_comments where pull_request_id = {} ORDER BY created_at;".format(PR_ID)
rows = queryDB(conn, comments)
dates = []
scores = []
for date, comment in rows:
ss = sid.polarity_scores(comment)
dates.append(date)
scores.append(ss['compound'])
""" if ss['compound'] < -0.75 or ss['compound'] > 0.75:
print comment
print ss"""
if len(dates) > 0:
if ID in projects_to_pr_comments:
current = projects_to_pr_comments[ID]
current[0].extend(dates)
current[1].extend(scores)
else:
projects_to_pr_comments[ID] = [dates,scores]
## COMMITS
project_ids = queryDB(conn, "select id from projects where forked_from is null;")[:-1]
projects_to_commits = {}
for ID in [x[0] for x in project_ids]:
commits = "select project_id, created_at from commits where project_id = {} ORDER BY created_at;".format(ID)
rows = queryDB(conn, commits)
rows = [r[1] for r in rows] #.strftime('%m/%d/%Y')
projects_to_commits[ID] = rows
conn.close()
## PLOT
# Ensure that the axis ticks only show up on the bottom and left of the plot.
# Ticks on the right and top of the plot are generally unnecessary chartjunk.
j = 0
for ID,date_scores in projects_to_pr_comments.items():
fig = plt.figure(figsize=(9, 9))
ax = plt.subplot(111)
plt.title("Total Commits and Sentiment Analysis for Pull Requests Over Time For Repository {} on Github".format(ID), fontsize=10)
#plt.ylabel("Average Sentiment", fontsize=12)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
indices = [i[0] for i in sorted(enumerate(date_scores[0]), key=lambda x:x[1])]
sorted_dates = []
sorted_scores = []
current_sum = 0.0
count = 0
for i in indices:
sorted_dates.append(date_scores[0][i])
current_sum += date_scores[1][i]
count += 1.0
sorted_scores.append(current_sum)
sorted_scores = [i / current_sum for i in sorted_scores]
plt.plot_date(sorted_dates,sorted_scores, '-b', label="Total Sentiment")
datetime_p = projects_to_commits[ID]
datetime = []
for d in datetime_p:
if d >= date_scores[0][0]:
datetime.append(datetime_p)
normalized = [i / float(len(datetime)) for i in range(1, len(datetime) + 1)]
plt.plot_date(datetime,normalized, '-g', label="Total Commits (Normalized)")
ax.legend(loc=0)
fig.savefig('plots/total_commits_and_total_sentiment_date_normalized/{}_total_commits_and_total_sentiment.png'.format(ID))
plt.close(fig)
#plt.show()
|
natashaarmbrust/github-sentiment
|
pr_sentiments.py
|
pr_sentiments.py
|
py
| 3,745 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "matplotlib.colors.BASE_COLORS",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.colors",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "matplotlib.colors.CSS4_COLORS",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.colors.rgb_to_hsv",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "matplotlib.colors",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "matplotlib.colors.to_rgba",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "mysql.connector.connector.connect",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "mysql.connector.connector",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "mysql.connector",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "nltk.sentiment.vader.SentimentIntensityAnalyzer",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 84,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 85,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 86,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot_date",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 102,
"usage_type": "name"
},
{
"api_name": "datetime.append",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot_date",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 111,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.close",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 114,
"usage_type": "name"
}
] |
10136905988
|
import cv2 as cv
import copy
import numpy as np
from matplotlib import pyplot as plt
img = cv.imread("/home/arkaprabha/CViiing/photos/cameron.jpeg")
cv.imshow("image",img)
def reframe(frame=None,scale=0.75):
width= int(frame.shape[1] + scale)
height = int(frame.shape[0] + scale)
dimen = (width,height)
return cv.resize(frame,dimen,cv.INTER_AREA)
def drawshapes(frame,n1,m1,n2,m2):
cv.rectangle(frame,(n1,m1),(n2,m2),(0,255,0),thickness=2)
cv.circle(frame,(n2//2,m2 + m2//2),40,(0,0,255),thickness = 3)
cv.putText(frame,"Didn't meant to put the circle there :)",(0,m2+10),cv.FONT_HERSHEY_TRIPLEX,0.25,(255,255,255),1)
cv.imshow("Bound",frame)
def translate(img, x,y):
# -x -> Left
# -y -> Up
# x -> Right
# y -> Down
transMat = np.float32([[1,0,x],[0,1,y]])
dimensions = (img.shape[1],img.shape[0])
return cv.warpAffine(img, transMat, dimensions)
def rotate(img,angle,rotPoint = None,scale=1.0):
(height,width)= img.shape[:2]
if (rotPoint is None):
rotPoint= (width//2,height//2)
rotmat = cv.getRotationMatrix2D(rotPoint, angle,scale) #third factor is scale here we pass 1.0 as we dont need to scale for rotation
dimens = (width,height)
return cv.warpAffine(img,rotmat,dimens)
#basically shows our hot Dr.Cameron magnified/zoomed 50 times
im2 = reframe(img,500)
cv.imshow("image2",im2)
#draw shapes on the image
x = copy.copy(img)
drawshapes(x,10,10,img.shape[0]//2,img.shape[1]//2)
#grayscale
gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY)
cv.imshow("Gray",gray)
# blur
# gaussian blurr
x = copy.copy(im2)
blurrr = cv.GaussianBlur(x,(41,41),cv.BORDER_DEFAULT)
cv.imshow("Blur",blurrr)
# median blurr
mblur = cv.medianBlur(x,3)
cv.imshow("Median blur",mblur)
# bilateral
bilateral = cv.bilateralFilter(x, 5 ,15,15)
cv.imshow("Bilateral",bilateral)
#edge cascade
canny = cv.Canny(x,90,100)
cv.imshow("canny",canny)
#dilating the image
dilated = cv.dilate(x,(40,40),iterations=5)
cv.imshow("Dilated",dilated)
#eroding the image
eroded = cv.erode(x,(10,10),iterations = 5)
cv.imshow("eroded",eroded)
#translate
translated = translate(x,-100,10)
cv.imshow("Translated",translated)
#rotate
rotated = rotate(x,45)
cv.imshow("Rotated",rotated)
#resize
resized = cv.resize(x,(1000,1000), interpolation = cv.INTER_CUBIC)
cv.imshow("Resized", resized)
#flipping
flip = cv.flip(x,0)
cv.imshow('Flipped',flip)
#thresholding
gray2 = cv.cvtColor(im2,cv.COLOR_BGR2GRAY)
ret,thresh = cv.threshold(gray2,100,125,cv.THRESH_BINARY)
cv.imshow("Thresholded",thresh) #produces a really hot image
print(ret)
#contours
#RETR_LIST = list of all contours
#RETR_EXTERNAL = list of all external contours
#RETR_TREE = list of all heirarchical contours
#Contour approximations
#CHAIN_APPROX_NONE = No approximations
#CHAIN_APPROX_SIMPLE = Compresses all contours into simpler ones(layman explanation)
contours, heirarchies = cv.findContours(thresh,cv.RETR_LIST,cv.CHAIN_APPROX_SIMPLE)
print(f'{len(contours)} contours found')
blank = np.zeros(x.shape[:2],dtype='uint8')
cv.drawContours(blank,contours,-1,(255,0,0),1)
cv.imshow("Contours",blank)
#color spaces
hsv = cv.cvtColor(x, cv.COLOR_BGR2HSV)
cv.imshow("HSV",hsv)
#rgb
rgb = cv.cvtColor(x, cv.COLOR_BGR2RGB)
cv.imshow("RGB",rgb)
#lab
lab = cv.cvtColor(x, cv.COLOR_BGR2LAB)
cv.imshow("LAB",lab)
#color_channels
b,g,r = cv.split(im2)
blank = np.zeros(im2.shape[:2],dtype='uint8')
blue = cv.merge([b,blank,blank])
green = cv.merge([blank,g,blank])
red = cv.merge([blank,blank,r])
cv.imshow("Blue",blue)
cv.imshow("Green",green)
cv.imshow("red",red)
#edge detection
#laplacian
lap = cv.Laplacian(gray, cv.CV_64F)
lap = np.uint8(np.absolute(lap))
cv.imshow("Laplacian edge",lap)
#sobel
sobelx = cv.Sobel(gray, cv.CV_64F,1,0)
sobely = cv.Sobel(gray, cv.CV_64F,0,1)
cv.imshow("Sobelx gray",sobelx)
cv.imshow("Sobely gray",sobely)
#histogram
#grayscale histogram
gray_hist = cv.calcHist([gray],[0],None,[256],[0,256])
plt.plot(gray_hist)
plt.title("HISTORGRAM")
#color histogram
colors = ('b','g','r')
for i,col in enumerate(colors):
hist = cv.calcHist([img],[i],None,[256],[0,256])
plt.plot(hist , color = col)
plt.show()
cv.waitKey(0)
|
ArkaprabhaChakraborty/CViiing
|
python/basics.py
|
basics.py
|
py
| 4,193 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "cv2.imread",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "cv2.resize",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "cv2.INTER_AREA",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "cv2.rectangle",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "cv2.circle",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "cv2.putText",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "cv2.FONT_HERSHEY_TRIPLEX",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "cv2.imshow",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "cv2.warpAffine",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "cv2.getRotationMatrix2D",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "cv2.warpAffine",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "copy.copy",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRAY",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "cv2.imshow",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "copy.copy",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "cv2.GaussianBlur",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "cv2.BORDER_DEFAULT",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "cv2.imshow",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "cv2.medianBlur",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "cv2.bilateralFilter",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "cv2.Canny",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "cv2.dilate",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "cv2.erode",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "cv2.resize",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "cv2.INTER_CUBIC",
"line_number": 84,
"usage_type": "attribute"
},
{
"api_name": "cv2.imshow",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "cv2.flip",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRAY",
"line_number": 92,
"usage_type": "attribute"
},
{
"api_name": "cv2.threshold",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "cv2.THRESH_BINARY",
"line_number": 93,
"usage_type": "attribute"
},
{
"api_name": "cv2.imshow",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "cv2.findContours",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "cv2.RETR_LIST",
"line_number": 106,
"usage_type": "attribute"
},
{
"api_name": "cv2.CHAIN_APPROX_SIMPLE",
"line_number": 106,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "cv2.drawContours",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2HSV",
"line_number": 115,
"usage_type": "attribute"
},
{
"api_name": "cv2.imshow",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2RGB",
"line_number": 119,
"usage_type": "attribute"
},
{
"api_name": "cv2.imshow",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2LAB",
"line_number": 123,
"usage_type": "attribute"
},
{
"api_name": "cv2.imshow",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "cv2.split",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "cv2.merge",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "cv2.merge",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "cv2.merge",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "cv2.Laplacian",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "cv2.CV_64F",
"line_number": 142,
"usage_type": "attribute"
},
{
"api_name": "numpy.uint8",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "numpy.absolute",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "cv2.Sobel",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "cv2.CV_64F",
"line_number": 147,
"usage_type": "attribute"
},
{
"api_name": "cv2.Sobel",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "cv2.CV_64F",
"line_number": 148,
"usage_type": "attribute"
},
{
"api_name": "cv2.imshow",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "cv2.calcHist",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 159,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 160,
"usage_type": "name"
},
{
"api_name": "cv2.calcHist",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 166,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 167,
"usage_type": "name"
},
{
"api_name": "cv2.waitKey",
"line_number": 170,
"usage_type": "call"
}
] |
36615991000
|
import asyncio
import logging
import random
import sys
from time import sleep
def run_async():
async def mytask(tid: str):
n = random.randint(1, 3)
for i in range(n):
logging.info(f"task {tid} {i} of {n}")
await asyncio.sleep(1)
logging.info(f"finished {tid} {n}")
async def async_mytasks():
ids = [f"t-{i}" for i in range(100)]
ts = [asyncio.create_task(mytask(tid)) for tid in ids]
await asyncio.wait(ts)
logging.info("all finished")
asyncio.run(async_mytasks())
def call_worker():
import requests
logging.info("call worker")
rs = f"http://172.17.0.2:8080/1"
logging.info(f"sending {rs}")
r = requests.get(rs)
r1 = r.json()["result"]
logging.info(f"result for {rs}: {r.reason} {r1}")
def run_concurrent():
from concurrent.futures import ThreadPoolExecutor
def square(n):
print(f"Started square({n})")
st = 0.5 + random.random() * 2
sleep(st)
print(f"Almost finished square({n})")
return n * n
values = range(20)
with ThreadPoolExecutor(max_workers=15) as executor:
results = executor.map(square, values)
print(list(results))
def main():
# run_async()
# call_worker()
run_concurrent()
if __name__ == '__main__':
logging.basicConfig(stream=sys.stdout, level='INFO')
main()
|
wwagner4/pymultiworker
|
tryout.py
|
tryout.py
|
py
| 1,397 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "random.randint",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "asyncio.sleep",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "asyncio.create_task",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "asyncio.wait",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "asyncio.run",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "random.random",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "concurrent.futures.ThreadPoolExecutor",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "logging.basicConfig",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 60,
"usage_type": "attribute"
}
] |
9887556442
|
import cv2, sys, time
def start_split(filename):
start = time.time()
video = cv2.VideoCapture(filename)
if not video:
print("无法读取视频文件")
sys.exit(1)
count = 0
while video.isOpened():
print("\r正在处理第{0}帧图像".format(count), end="")
ret, frame = video.read()
if not ret:
break
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
resize = cv2.resize(gray, (8, 8))
cv2.imwrite("./pic/" + str(count).zfill(5) + ".jpg", resize)
count += 1
video.release()
end = time.time()
print("\n处理完成,处理了{0}帧图像,用时{1}秒".format(count, round(end - start, 3)))
return
if __name__ == "__main__":
file = "ba10s.mp4"
start_split(file)
|
Temperature6/BadAppleVideoProcess
|
VideoSplit.py
|
VideoSplit.py
|
py
| 825 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "time.time",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "cv2.VideoCapture",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRAY",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "cv2.resize",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "cv2.imwrite",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 25,
"usage_type": "call"
}
] |
27322987878
|
from __future__ import print_function, division, absolute_import, unicode_literals
import logging
import os
from inspect import isclass
from tempfile import NamedTemporaryFile
from collections import OrderedDict
from fontTools.misc.py23 import tobytes, tounicode, UnicodeIO
from fontTools.feaLib.parser import Parser
from fontTools.feaLib.builder import addOpenTypeFeaturesFromString
from fontTools.feaLib.error import IncludedFeaNotFound, FeatureLibError
from fontTools import mtiLib
from ufo2ft.constants import MTI_FEATURES_PREFIX
from ufo2ft.featureWriters import (
KernFeatureWriter,
MarkFeatureWriter,
loadFeatureWriters,
ast,
)
logger = logging.getLogger(__name__)
def parseLayoutFeatures(font):
""" Parse OpenType layout features in the UFO and return a
feaLib.ast.FeatureFile instance.
"""
featxt = tounicode(font.features.text or "", "utf-8")
if not featxt:
return ast.FeatureFile()
buf = UnicodeIO(featxt)
ufoPath = font.path
includeDir = None
if ufoPath is not None:
# The UFO v3 specification says "Any include() statements must be relative to
# the UFO path, not to the features.fea file itself". We set the `name`
# attribute on the buffer to the actual feature file path, which feaLib will
# pick up and use to attribute errors to the correct file, and explicitly set
# the include directory to the parent of the UFO.
ufoPath = os.path.normpath(ufoPath)
buf.name = os.path.join(ufoPath, "features.fea")
includeDir = os.path.dirname(ufoPath)
glyphNames = set(font.keys())
try:
parser = Parser(buf, glyphNames, includeDir=includeDir)
doc = parser.parse()
except IncludedFeaNotFound as e:
if ufoPath and os.path.exists(os.path.join(ufoPath, e.args[0])):
logger.warning(
"Please change the file name in the include(...); "
"statement to be relative to the UFO itself, "
"instead of relative to the 'features.fea' file "
"contained in it."
)
raise
return doc
class BaseFeatureCompiler(object):
"""Base class for generating OpenType features and compiling OpenType
layout tables from these.
"""
def __init__(self, ufo, ttFont=None, glyphSet=None, **kwargs):
"""
Args:
ufo: an object representing a UFO (defcon.Font or equivalent)
containing the features source data.
ttFont: a fontTools TTFont object where the generated OpenType
tables are added. If None, an empty TTFont is used, with
the same glyph order as the ufo object.
glyphSet: a (optional) dict containing pre-processed copies of
the UFO glyphs.
"""
self.ufo = ufo
if ttFont is None:
from fontTools.ttLib import TTFont
from ufo2ft.util import makeOfficialGlyphOrder
ttFont = TTFont()
ttFont.setGlyphOrder(makeOfficialGlyphOrder(ufo))
self.ttFont = ttFont
glyphOrder = ttFont.getGlyphOrder()
if glyphSet is not None:
assert set(glyphOrder) == set(glyphSet.keys())
else:
glyphSet = ufo
self.glyphSet = OrderedDict((gn, glyphSet[gn]) for gn in glyphOrder)
def setupFeatures(self):
""" Make the features source.
**This should not be called externally.** Subclasses
must override this method.
"""
raise NotImplementedError
def buildTables(self):
""" Compile OpenType feature tables from the source.
**This should not be called externally.** Subclasses
must override this method.
"""
raise NotImplementedError
def setupFile_features(self):
""" DEPRECATED. Use 'setupFeatures' instead. """
_deprecateMethod("setupFile_features", "setupFeatures")
self.setupFeatures()
def setupFile_featureTables(self):
""" DEPRECATED. Use 'setupFeatures' instead. """
_deprecateMethod("setupFile_featureTables", "buildTables")
self.buildTables()
def compile(self):
if "setupFile_features" in self.__class__.__dict__:
_deprecateMethod("setupFile_features", "setupFeatures")
self.setupFile_features()
else:
self.setupFeatures()
if "setupFile_featureTables" in self.__class__.__dict__:
_deprecateMethod("setupFile_featureTables", "buildTables")
self.setupFile_featureTables()
else:
self.buildTables()
return self.ttFont
def _deprecateMethod(arg, repl):
import warnings
warnings.warn(
"%r method is deprecated; use %r instead" % (arg, repl),
category=UserWarning,
stacklevel=3,
)
class FeatureCompiler(BaseFeatureCompiler):
"""Generate automatic features and compile OpenType tables from Adobe
Feature File stored in the UFO, using fontTools.feaLib as compiler.
"""
defaultFeatureWriters = [KernFeatureWriter, MarkFeatureWriter]
def __init__(self, ufo, ttFont=None, glyphSet=None, featureWriters=None, **kwargs):
"""
Args:
featureWriters: a list of BaseFeatureWriter subclasses or
pre-initialized instances. The default value (None) means that:
- first, the UFO lib will be searched for a list of featureWriters
under the key "com.github.googlei18n.ufo2ft.featureWriters"
(see loadFeatureWriters).
- if that is not found, the default list of writers will be used:
[KernFeatureWriter, MarkFeatureWriter]. This generates "kern"
(or "dist" for Indic scripts), "mark" and "mkmk" features.
If the featureWriters list is empty, no automatic feature is
generated and only pre-existing features are compiled.
"""
BaseFeatureCompiler.__init__(self, ufo, ttFont, glyphSet)
self.initFeatureWriters(featureWriters)
if kwargs.get("mtiFeatures") is not None:
import warnings
warnings.warn(
"mtiFeatures argument is ignored; "
"you should use MtiLibFeatureCompiler",
category=UserWarning,
stacklevel=2,
)
def initFeatureWriters(self, featureWriters=None):
""" Initialize feature writer classes as specified in the UFO lib.
If none are defined in the UFO, the default feature writers are used:
currently, KernFeatureWriter and MarkFeatureWriter.
The 'featureWriters' argument can be used to override these.
The method sets the `self.featureWriters` attribute with the list of
writers.
Note that the writers that generate GSUB features are placed first in
this list, before all others. This is because the GSUB table may be
used in the subsequent feature writers to resolve substitutions from
glyphs with unicodes to their alternates.
"""
if featureWriters is None:
featureWriters = loadFeatureWriters(self.ufo)
if featureWriters is None:
featureWriters = self.defaultFeatureWriters
gsubWriters = []
others = []
for writer in featureWriters:
if isclass(writer):
writer = writer()
if writer.tableTag == "GSUB":
gsubWriters.append(writer)
else:
others.append(writer)
self.featureWriters = gsubWriters + others
def setupFeatures(self):
"""
Make the features source.
**This should not be called externally.** Subclasses
may override this method to handle the file creation
in a different way if desired.
"""
if self.featureWriters:
featureFile = parseLayoutFeatures(self.ufo)
for writer in self.featureWriters:
writer.write(self.ufo, featureFile, compiler=self)
# stringify AST to get correct line numbers in error messages
self.features = featureFile.asFea()
else:
# no featureWriters, simply read existing features' text
self.features = tounicode(self.ufo.features.text or "", "utf-8")
def writeFeatures(self, outfile):
if hasattr(self, "features"):
outfile.write(self.features)
def buildTables(self):
"""
Compile OpenType feature tables from the source.
Raises a FeaLibError if the feature compilation was unsuccessful.
**This should not be called externally.** Subclasses
may override this method to handle the table compilation
in a different way if desired.
"""
if not self.features:
return
# the path is used by the lexer to follow 'include' statements;
# if we generated some automatic features, includes have already been
# resolved, and we work from a string which does't exist on disk
path = self.ufo.path if not self.featureWriters else None
try:
addOpenTypeFeaturesFromString(self.ttFont, self.features, filename=path)
except FeatureLibError:
if path is None:
# if compilation fails, create temporary file for inspection
data = tobytes(self.features, encoding="utf-8")
with NamedTemporaryFile(delete=False) as tmp:
tmp.write(data)
logger.error("Compilation failed! Inspect temporary file: %r", tmp.name)
raise
class MtiFeatureCompiler(BaseFeatureCompiler):
""" Compile OpenType layout tables from MTI feature files using
fontTools.mtiLib.
"""
def setupFeatures(self):
ufo = self.ufo
features = {}
# includes the length of the "/" separator
prefixLength = len(MTI_FEATURES_PREFIX) + 1
for fn in ufo.data.fileNames:
if fn.startswith(MTI_FEATURES_PREFIX) and fn.endswith(".mti"):
content = tounicode(ufo.data[fn], encoding="utf-8")
features[fn[prefixLength:-4]] = content
self.mtiFeatures = features
def buildTables(self):
for tag, features in self.mtiFeatures.items():
table = mtiLib.build(features.splitlines(), self.ttFont)
assert table.tableTag == tag
self.ttFont[tag] = table
|
Torneo-Tipografico-Comunidad/Torneo-2020
|
Calmadita /05_SOURCES/sources/venv/lib/python3.7/site-packages/ufo2ft/featureCompiler.py
|
featureCompiler.py
|
py
| 10,497 |
python
|
en
|
code
| 7 |
github-code
|
6
|
[
{
"api_name": "logging.getLogger",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "fontTools.misc.py23.tounicode",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "ufo2ft.featureWriters.ast.FeatureFile",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "ufo2ft.featureWriters.ast",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "fontTools.misc.py23.UnicodeIO",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "os.path.normpath",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "fontTools.feaLib.parser.Parser",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "fontTools.feaLib.error.IncludedFeaNotFound",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "os.path.exists",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "fontTools.ttLib.TTFont",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "ufo2ft.util.makeOfficialGlyphOrder",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "collections.OrderedDict",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "warnings.warn",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "ufo2ft.featureWriters.KernFeatureWriter",
"line_number": 151,
"usage_type": "name"
},
{
"api_name": "ufo2ft.featureWriters.MarkFeatureWriter",
"line_number": 151,
"usage_type": "name"
},
{
"api_name": "{'TTFont': 'fontTools.ttLib.TTFont', 'makeOfficialGlyphOrder': 'ufo2ft.util.makeOfficialGlyphOrder'}.__init__",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "warnings.warn",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "ufo2ft.featureWriters.loadFeatureWriters",
"line_number": 195,
"usage_type": "call"
},
{
"api_name": "inspect.isclass",
"line_number": 202,
"usage_type": "call"
},
{
"api_name": "fontTools.misc.py23.tounicode",
"line_number": 229,
"usage_type": "call"
},
{
"api_name": "fontTools.feaLib.builder.addOpenTypeFeaturesFromString",
"line_number": 253,
"usage_type": "call"
},
{
"api_name": "fontTools.feaLib.error.FeatureLibError",
"line_number": 254,
"usage_type": "name"
},
{
"api_name": "fontTools.misc.py23.tobytes",
"line_number": 257,
"usage_type": "call"
},
{
"api_name": "tempfile.NamedTemporaryFile",
"line_number": 258,
"usage_type": "call"
},
{
"api_name": "ufo2ft.constants.MTI_FEATURES_PREFIX",
"line_number": 274,
"usage_type": "argument"
},
{
"api_name": "ufo2ft.constants.MTI_FEATURES_PREFIX",
"line_number": 276,
"usage_type": "argument"
},
{
"api_name": "fontTools.misc.py23.tounicode",
"line_number": 277,
"usage_type": "call"
},
{
"api_name": "fontTools.mtiLib.build",
"line_number": 283,
"usage_type": "call"
},
{
"api_name": "fontTools.mtiLib",
"line_number": 283,
"usage_type": "name"
}
] |
26260800957
|
import pygame
class Item:
def __init__(self, image, x, y, id, player):
self.image = pygame.image.load(image)
self.x, self.y = x, y
self.player = player
self.id = id
def draw(self, screen):
screen.blit(self.image, (self.x, self.y))
def onPlayerCollision(self):
pygame.mixer.music.load("other\\itempickup.wav")
pygame.mixer.music.play(1)
if self.id == 1:
self.player.hasSnorkel = True
if self.id == 2:
self.player.hasMachete = True
self.player.attack_damage = 5
if self.id == 3:
self.player.hasBombs = True
self.player.bombs = 15
|
SeaPickle754/zeldaish
|
item.py
|
item.py
|
py
| 588 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pygame.image.load",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "pygame.mixer.music.load",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pygame.mixer",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "pygame.mixer.music.play",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pygame.mixer",
"line_number": 14,
"usage_type": "attribute"
}
] |
20546789103
|
import discord
from discord.ext import commands
from discord.ext.commands import Command
from chime.main import prefix
from chime.misc.CustomCommand import CustomCommand
from chime.misc.StyledEmbed import StyledEmbed
class EmbedHelpCommand(commands.HelpCommand):
"""This is an example of a HelpCommand that utilizes embeds.
It's pretty basic but it lacks some nuances that people might expect.
1. It breaks if you have more than 25 cogs or more than 25 subcommands. (Most people don't reach this)
2. It doesn't DM users. To do this, you have to override `get_destination`. It's simple.
Other than those two things this is a basic skeleton to get you started. It should
be simple to modify if you desire some other behaviour.
To use this, pass it to the bot constructor e.g.:
bot = commands.Bot(help_command=EmbedHelpCommand())
"""
# Set the embed colour here
COLOUR = discord.colour.Color.from_rgb(r=255, g=197, b=84)
def get_command_signature(self, command):
return '{0.qualified_name} {0.signature}'.format(command)
async def send_bot_help(self, mapping):
embed = StyledEmbed(title='chime help')
embed.set_thumbnail(url="https://raw.githubusercontent.com/realmayus/chime/master/assets/chime_banner.png?token=AJC6B5VTHEZ5UHNY7QNDCU263LCCK")
embed.description = "chime is a versatile, yet intuitive music bot for discord. It aims to be as user-friendly as possible while still boasting many features. \n\n" \
"**More info and invite link [here](https://chime.realmayus.xyz)** \n\n" \
"Chime has a **web app** where you can manage and set up personal playlists and manage settings of your servers! https://chime.realmayus.xyz \n\n" \
"**Use** `" + self.clean_prefix + "help [command]` **for more info on a command.**"
for cog, commands in mapping.items():
if cog is not None: # We don't want commands without categories! >:c
name = cog.qualified_name
filtered = await self.filter_commands(commands, sort=True)
if filtered:
builder = []
for command in commands: # filtering out hidden commands
command: Command
builder.append(f"`{prefix + command.name}`" if not command.hidden else "")
value = ' '.join(builder)
if cog and cog.description:
value = '{0}\n{1}'.format(cog.description, value)
embed.add_field(name=name, value=value)
await self.get_destination().send(embed=embed)
async def send_cog_help(self, cog):
pass
async def send_group_help(self, group: CustomCommand):
embed = StyledEmbed(title='`' + group.qualified_name + '`')
desc = ""
if group.help:
desc += group.help
if group.usage:
embed.add_field(name="**Usage**", value=f"`{prefix + group.usage}`", inline=False)
if group.aliases and len(group.aliases) > 0:
embed.add_field(name="**Aliases**", value=' '.join([f"`{prefix + alias}`" for alias in group.aliases]), inline=False)
if hasattr(group, "available_args") and group.available_args:
arg_builder = ""
for typ in group.available_args:
arg_builder += f"\n**{typ['type']}**"
for arg in typ['args']:
arg_builder += f"\n`{arg['name']}`\n***{arg['desc']}***"
embed.add_field(name="**Arguments**", value=arg_builder)
if hasattr(group, "examples") and group.examples:
example_builder = ""
for ex in group.examples:
example_builder += f"\n`{ex['ex']}`\n{ex['desc']}"
embed.add_field(name="**Examples**", value=example_builder)
embed.description = desc
await self.get_destination().send(embed=embed)
# This makes it so it uses the function above
# Less work for us to do since they're both similar.
# If you want to make regular command help look different then override it
send_command_help = send_group_help
|
realmayus/chime
|
chime/cogs/HelpCommandCog.py
|
HelpCommandCog.py
|
py
| 4,233 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "discord.ext.commands.HelpCommand",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "discord.colour.Color.from_rgb",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "discord.colour",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "chime.misc.StyledEmbed.StyledEmbed",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands",
"line_number": 38,
"usage_type": "argument"
},
{
"api_name": "discord.ext.commands",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.Command",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "chime.main.prefix",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "chime.misc.CustomCommand.CustomCommand",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "chime.misc.StyledEmbed.StyledEmbed",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "chime.main.prefix",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "chime.main.prefix",
"line_number": 65,
"usage_type": "name"
}
] |
19202891943
|
#!/usr/bin/python3
"""
A module that include the parent class Basemodel
"""
import models
from datetime import datetime
from uuid import uuid4
class BaseModel:
"""
class BaseModel that defines all common attributes/methods
for other classes.
"""
def __init__(self, *args, **kwargs):
"""
__init__ method to initise the an object during its creation
Args:
*args: tuple of arguments
**kwags: key value variables
"""
self.id = str(uuid4())
self.created_at = datetime.now()
self.updated_at = datetime.now()
if kwargs:
for key, value in kwargs.items():
if key != '__class__':
if key in ['created_at', 'updated_at']:
fformat = '%Y-%m-%dT%H:%M:%S.%f'
"""convert the string values to datetime objects"""
value = datetime.strptime(value, fformat)
setattr(self, key, value)
""" ^^^this is like self.key = value
and this line solves the purpose of
each value of this dictionary is the value of
this attribute name
"""
else:
self.id = str(uuid4())
self.created_at = datetime.now()
self.updated_at = datetime.now()
models.storage.new(self)
def __str__(self):
"""
__str__: should print: [<class name>] (<self.id>) <self.__dict__>
"""
class_name = self.__class__.__name__
return "[{}] ({}) {}".format(class_name, self.id, self.__dict__)
def save(self):
"""
save(self): updates the public instance attribute
updated_at with the current datetime
"""
self.updated_at = datetime.now()
models.storage.save()
def to_dict(self):
"""
returns a dictionary containing all keys/values
of __dict__ of the instance
"""
c_dict = self.__dict__.copy()
c_dict['__class__'] = self.__class__.__name__
c_dict['created_at'] = self.created_at.isoformat()
c_dict['updated_at'] = self.updated_at.isoformat()
return c_dict
|
sanotogii/AirBnB_clone
|
models/base_model.py
|
base_model.py
|
py
| 2,261 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "uuid.uuid4",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "uuid.uuid4",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "models.storage.new",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "models.storage",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "models.storage.save",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "models.storage",
"line_number": 62,
"usage_type": "attribute"
}
] |
18801821077
|
import requests
requests.packages.urllib3.disable_warnings() # noqa
url = "https://api.github.com/repos/milvus-io/milvus/actions/workflows"
payload = {}
token = "" # your token
headers = {
"Authorization": f"token {token}",
}
response = requests.request("GET", url, headers=headers, data=payload)
def analysis_workflow(workflow_name, workflow_response):
"""
Used to count the number of successes and failures of jobs in the chaos test workflow,
so as to understand the robustness of different components(each job represents a component).
"""
workflow_id = [w["id"] for w in workflow_response.json()["workflows"] if workflow_name in w["name"]][0]
runs_response = requests.request("GET", f"https://api.github.com/repos/milvus-io/milvus/actions/workflows/{workflow_id}/runs", headers=headers, data=payload, verify=False)
workflow_runs = [r["id"] for r in runs_response.json()["workflow_runs"] if r["status"] == "completed" and r["event"] == "schedule"]
results = {}
for run in workflow_runs:
job_url = f"https://api.github.com/repos/milvus-io/milvus/actions/runs/{run}/jobs"
job_response = requests.request("GET", job_url, headers=headers, data=payload, verify=False)
for r in job_response.json()["jobs"]:
if r["name"] not in results:
results[r["name"]] = {"success": 0, "failure": 0}
if r["status"] == "completed" and r["conclusion"] == "success":
results[r["name"]]["success"] += 1
elif r["status"] == "completed" and r["conclusion"] != "success":
results[r["name"]]["failure"] += 1
return results
for workflow in ["Pod Kill"]:
result = analysis_workflow(workflow, response)
print(f"{workflow}:")
for k, v in result.items():
print(f"{k} success: {v['success']}, failure: {v['failure']}")
print("\n")
|
milvus-io/milvus
|
tests/python_client/chaos/scripts/workflow_analyse.py
|
workflow_analyse.py
|
py
| 1,895 |
python
|
en
|
code
| 24,190 |
github-code
|
6
|
[
{
"api_name": "requests.packages.urllib3.disable_warnings",
"line_number": 2,
"usage_type": "call"
},
{
"api_name": "requests.packages",
"line_number": 2,
"usage_type": "attribute"
},
{
"api_name": "requests.request",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "requests.request",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "requests.request",
"line_number": 25,
"usage_type": "call"
}
] |
29285921224
|
from rply import LexerGenerator # type: ignore
# lexer for the "calc" command
lg = LexerGenerator()
lg.add('NUMBER', r'[0-9]+[\.]?[0-9]*') # number token
lg.add('ADDITION', r'\+') # operator tokens
lg.add('SUBTRACTION', r'-')
lg.add('MULTIPLICATION', r'\*')
lg.add('EXPONENT', r'\^')
lg.add('DIVISION', r'\/')
lg.add('FIRST_BRACKETS', r'\[') # grouping tokens
lg.add('SECOND_BRACKETS', r'\]')
lg.add('FIRST_BRACES', r'\{')
lg.add('SECOND_BRACES', r'\}')
lg.add('FIRST_PARENTHESIS', r'\(')
lg.add('SECOND_PARENTHESIS', r'\)')
lg.ignore('\s+') # ignore whitespace
l = lg.build()
|
Chrovo/Productivity
|
cogs/utils/lexer.py
|
lexer.py
|
py
| 582 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "rply.LexerGenerator",
"line_number": 4,
"usage_type": "call"
}
] |
22021920480
|
'''
Jessica Dutton
Store Manager (user entity)
'''
from google.cloud import datastore
from flask import Blueprint, Flask, request, make_response
import json
import constants
from google.oauth2 import id_token
from google.auth.transport import requests
client = datastore.Client()
bp = Blueprint('store_manager', __name__, url_prefix='/store_managers')
@bp.route('', methods=['GET'])
def store_managers_get():
if request.method == 'GET':
store_managers = []
if 'application/json' not in request.accept_mimetypes:
response = {"Error": "Response type must be JSON"}
res = make_response(response)
res.mimetype = 'application/json'
res.status_code = 406
return res
# get the dvds
query = client.query(kind=constants.dvds)
results = list(query.fetch())
for dvd in results:
if dvd['store_manager'] not in store_managers:
store_managers.append(dvd['store_manager'])
if len(store_managers) != 0:
response = {"List of store manager IDs": store_managers}
else:
response = {"Results" : "No store managers currently registered in app"}
return (response, 200)
else:
return 'Method not recognized'
|
jdutt25/dvd_store
|
store_manager.py
|
store_manager.py
|
py
| 1,310 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "google.cloud.datastore.Client",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "google.cloud.datastore",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "flask.Blueprint",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "flask.request.method",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "flask.request.accept_mimetypes",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "flask.make_response",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "constants.dvds",
"line_number": 29,
"usage_type": "attribute"
}
] |
3670374638
|
import math
from collections import defaultdict
from typing import Union, Callable, Tuple, List
import gevent
from gevent.queue import Queue
from requests import PreparedRequest, Response
from lib.utils.logger import Logger
from lib.utils.request_helper import RequestHelper, RequestInfo
class BaseFinder(RequestHelper):
# Формат:
# {'example.com:8443': {'some_bucket': {'size': Union[int, None], 'in_progress': Union[bool, None]}, ...}, ...}
bucket_size_cache = defaultdict(lambda: defaultdict(dict))
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def determine_bucket_size(self, info: RequestInfo):
raise NotImplementedError
def find_secrets(self, info: RequestInfo, words: List[str]):
""" Проверяет изменения в ответе для заданного списка параметров `words` в теле запроса
:param info:
:param words: Названия параметров
:return: dict([(`param`, `reasons`)]) - если найдено конкретное слово
int - если со словами требуется провести манипуляции
"""
raise NotImplementedError
def get_bucket_size(self, info: RequestInfo):
""" Возвращает общие число хидеров в запросе """
raise NotImplementedError
def get_word_chunks(self, info: RequestInfo):
raise NotImplementedError
def is_info_searchable(self, info: RequestInfo):
raise NotImplementedError
def set_bucket_size(self, info: RequestInfo):
""" Устанавивает для запроса в `info` общее число хидеров """
raise NotImplementedError
def setup_requests_info(self, info_list: List[RequestInfo]):
raise NotImplementedError
def do_request(self, prepared_request: PreparedRequest, **kwargs) -> Union[Response, None]:
""" Выполняет подготовленных запрос с отчисткой промежуточного кэша
:param prepared_request:
:return: `None` - если по истечении `self.retry` попыток не удалось получить ответ от сервера
`requests.Response` - если удалось получить ответ от сервера
"""
return super().do_request(prepared_request, self.retry, self.timeout, self.delay, self.proxies,
self.arguments.allow_redirects, self.logger)
def filter_requests(self, *args, **kwargs):
kwargs.update({'logger': self.logger})
return super().filter_requests(*args, **kwargs)
def get_optimal_bucket(self, info: RequestInfo, min_chunk: int, add_random: Callable,
additional_size: Callable, logger: Logger) -> Union[int, None]:
""" Ищет оптимальный размер порции параметров соотношение (Длина порции) / (время ответа)
:param info:
:return:
"""
left, cur, right = 1024, 2048, 4096
left_border = 0
right_border = math.inf
counter = 5
optimal_size = None
optimal_rate = 0
# Ограничение на число циклов
while counter:
counter -= 1
# Если левая граница обнулилась
if left == 0:
break
# Если диапазон неделим, то прекратить цикл
if right - cur < 2 or cur - left < 2:
break
# Подготавливаем запросы
_requests = [info.copy_request() for _ in range(3)]
for request, length in zip(_requests, [left, cur, right]):
add_random(request, length)
# Отправляем
jobs = [gevent.spawn(self.do_request, request) for request in _requests]
gevent.joinall(jobs)
responses = [job.value for job in jobs]
# Получаем результаты
results = []
# results = [response.status_code == info.response.status_code if response is not None else response
# for response in responses]
for response in responses:
if not response:
results.append(None)
# Если совпадают коды ответа
elif response.status_code == info.response.status_code:
results.append(True)
# Если Payload Too Large/URI Too Long/Request Header Fields Too Large
elif response.status_code in {413, 414, 431}:
results.append(False)
# Если код ответа на отрезке [500, 599], а оригинальный код не в этом отрезке
elif 500 <= response.status_code < 600 and not 500 <= info.response.status_code < 600:
results.append(False)
# Если код ответа на отрезке [400, 499], а оригинальный код не в этом отрезке
elif 400 <= response.status_code < 500 and not 400 <= info.response.status_code < 500:
results.append(False)
else:
logger.debug(f'Необработанный случай: act_status_code={response.status_code}, orig_status_cod={info.response.status_code}')
results.append(True)
# Если все запросы не получили ответа от сервера, то сдвигаемся влево
if not any(results):
right_border = left
right = right_border
cur = right >> 1
left = cur >> 1
continue
# Иначе выбираем среди ответов оптимальный
rates = []
for response, size, result in zip([response for response in responses], [left, cur, right], results):
# Рассматриваем только те случаи, когда мы не вышли за границы
elapsed = response.elapsed.total_seconds() if (response is not None and result == True) else math.inf
rate = round(size / elapsed, 1)
rates.append(rate)
if rate > optimal_rate and result:
optimal_rate = rate
optimal_size = size
# Cмотрим, в какую сторону развивается динамика
max_rate = max(rates)
# Если все запросы не превысили границу, то двигаемся в сторону динамики
if all(results):
# Если динамика увеличивается слева
if rates[0] == max_rate:
right_border = right
# То смещаемся влево
right = left - 1
cur = right >> 1
left = cur >> 1
# Если левый указатель меньше левой границы
if left < left_border:
# То пересчитываем указатели в пределах границ
left, cur, right = self.shift_bounds(left_border, right_border)
# Если динамика увеличивается справа
elif rates[2] == max_rate:
left_border = left
# То смещаемся вправо
left = right + 1
cur = left << 1
right = cur << 1
# Если правый указатель вышел за пределы правой границы
if right > right_border:
# То пересчитываем указатели в пределах границ
left, cur, right = self.shift_bounds(left_border, right_border)
# Иначе рассматриваем окрестности центра
else:
left_border = left if left > left_border else left_border
right_border = right if right < right_border else right_border
left = (left + cur) // 2
right = (cur + right) // 2
# Если результаты [True, False|None, False|None]
elif results[0] == True and all([not r for r in results[1:]]):
right_border = cur if cur < right_border else right_border
# То сдвигаемся влево
right = left - 1
cur = right >> 1
left = cur >> 1
# Если результаты [True, True, False|None]
elif results[2] in {None, False} and all([r for r in results[:2]]):
right_border = right if right < right_border else right_border
# То смотрим на динамику слева и посередине
# Если динамика увеличивается слева
if rates[0] == max_rate:
# То сдвигаемся влево
right = left - 1 # Сдвигаем рассматриваемую правую границу на 1 от ранее рассматриваемой левой
cur = right >> 1
left = cur >> 1
# Если левый указатель меньше левой границы
if left < left_border:
# То пересчитываем указатели в пределах границ
left, cur, right = self.shift_bounds(left_border, right_border)
# Иначе копаем в пределах cur
else:
right = round((cur + right) / 2)
left = (left + cur) // 2
else:
# Сдвигаемся влево
right = left - 1 # Сдвигаем рассматриваемую правую границу на 1 от ранее рассматриваемой левой
cur = right >> 1
left = cur >> 1
# Если по итогу оптимальный размер меньше минимально требуемого, то вернуть минимально требуемый требуемый
if optimal_size is not None:
if optimal_size < min_chunk < right_border:
return min_chunk + additional_size(info)
return optimal_size + additional_size(info)
return optimal_size
@staticmethod
def parse_results_queue(results_queue: Queue):
results = defaultdict(lambda: defaultdict(list))
while results_queue.qsize():
result = results_queue.get()
for param_name, value in result.items():
url, reasons, type, response = value['url'], value['reasons'], value['type'], value['response']
results[url][type].append({'param': param_name, 'reasons': reasons, 'response': response})
return results
@staticmethod
def parse_results(results: list):
_results = defaultdict(lambda: defaultdict(list))
for result in results:
for param_name, value in result.items():
url, reasons, type, response = value['url'], value['reasons'], value['type'], value['response']
_results[url][type].append({'param': param_name, 'reasons': reasons, 'response': response})
return _results
@staticmethod
def shift_bounds(left_bound: int, right_bound: int) -> Tuple[int, int, int]:
""" Сдвигает тройку `left`, `cur`, `right` согласно новым границам `left_bound` и `right_bound` """
cur = (left_bound + right_bound) // 2
left = (left_bound + cur) // 2
right = round((cur + right_bound) / 2)
return left, cur, right
@staticmethod
def update_results(results: dict, new_results: dict) -> dict:
""" Обновляет словарь `results` данными из `new_results`
:param results:
:param new_results:
:return:
"""
if not len(results.keys()):
results = defaultdict(lambda: defaultdict(list))
for url in new_results:
for type in new_results[url]:
for new_info in new_results[url][type]:
new_param, new_reasons, new_response = new_info['param'], new_info['reasons'], new_info['response']
results[url][type].append({'param': new_param, 'reasons': new_reasons, 'response': new_response})
return results
|
medalahonor/suseeker
|
lib/finders/base_finder.py
|
base_finder.py
|
py
| 13,362 |
python
|
ru
|
code
| 3 |
github-code
|
6
|
[
{
"api_name": "lib.utils.request_helper.RequestHelper",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "collections.defaultdict",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "lib.utils.request_helper.RequestInfo",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "lib.utils.request_helper.RequestInfo",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "lib.utils.request_helper.RequestInfo",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "lib.utils.request_helper.RequestInfo",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "lib.utils.request_helper.RequestInfo",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "lib.utils.request_helper.RequestInfo",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "lib.utils.request_helper.RequestInfo",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "requests.PreparedRequest",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "requests.Response",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "lib.utils.request_helper.RequestInfo",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "typing.Callable",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "typing.Callable",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "lib.utils.logger.Logger",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "math.inf",
"line_number": 74,
"usage_type": "attribute"
},
{
"api_name": "gevent.spawn",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "gevent.joinall",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "math.inf",
"line_number": 142,
"usage_type": "attribute"
},
{
"api_name": "typing.Union",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "gevent.queue.Queue",
"line_number": 233,
"usage_type": "name"
},
{
"api_name": "collections.defaultdict",
"line_number": 234,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 245,
"usage_type": "call"
},
{
"api_name": "typing.Tuple",
"line_number": 254,
"usage_type": "name"
},
{
"api_name": "collections.defaultdict",
"line_number": 270,
"usage_type": "call"
}
] |
24847039251
|
import pygame
class Character():
"""Create a character - inherits from the Sprite class"""
def __init__(self, screen, game_settings):
self.screen = screen
self.settings = game_settings
# Load the character image and get its rect
self.image = pygame.image.load("images/Robot.png")
self.rect = self.image.get_rect()
self.screen_rect = screen.get_rect()
# Start the character at the bottom-left of the screen
self.rect.left = self.screen_rect.left
self.rect.bottom = self.screen_rect.bottom
# Store a decimal value for the character's center
self.centerx = float(self.rect.centerx)
self.bottom = float(self.rect.bottom)
self.start_y = 0.0
# Movement flags
self.moving_right = False
self.moving_left = False
self.jumping = False
self.falling = False
# Get width of character for entering and exiting screen
self.width = self.rect.width
self.height = self.rect.height
def update(self):
"""Update the character's position on the screen"""
# Update center for float value
if self.moving_right and self.rect.right < self.screen_rect.right + self.width:
self.centerx += self.settings.movement_speed
if self.moving_left and self.rect.left > 0:
self.centerx -= self.settings.movement_speed
if self.jumping:
self.bottom -= self.settings.jumping_speed
if self.bottom <= (self.start_y - self.settings.jump_height):
self.jumping = False
if self.falling:
self.bottom += self.settings.gravity
# Update rect using new centerx and bottom values
self.rect.centerx = self.centerx
self.rect.bottom = self.bottom
def reset(self):
"""Reset the character to the bottom-left of the screen"""
self.centerx = self.width / 2
self.bottom = self.screen_rect.bottom
def blitme(self):
"""Draw the character at its present location"""
self.screen.blit(self.image, self.rect)
|
YorkshireStu83/Flatpormer
|
character.py
|
character.py
|
py
| 2,187 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pygame.image.load",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 11,
"usage_type": "attribute"
}
] |
36807817632
|
import numpy as np
from scipy import signal
from scipy.signal import butter, lfilter
def createSpec(signals, sr, n_channels=22):
# Reference: https://github.com/MesSem/CNNs-on-CHB-MIT, DataSetToSpectrogram
n_channels = min(n_channels, 22)
for channel in range(n_channels):
y = signals[channel]
Pxx = signal.spectrogram(y, nfft=sr, fs=sr, return_onesided=True, noverlap=128)[2]
Pxx = np.delete(Pxx, np.s_[117:123 + 1], axis=0)
Pxx = np.delete(Pxx, np.s_[57:63 + 1], axis=0)
Pxx = np.delete(Pxx, 0, axis=0)
result = ((10 * np.log10(Pxx).T - (10 * np.log10(Pxx)).T.mean(axis=0)) / (10 * np.log10(Pxx)).T.std(axis=0))
if channel == 0:
spect = np.zeros((n_channels, *result.shape))
result = np.nan_to_num(result)
spect[channel] = result
return spect
# Filtro taglia banda
def butter_bandstop_filter(data, lowcut, highcut, fs, order):
nyq = 0.5 * fs
low = lowcut / nyq
high = highcut / nyq
i, u = butter(order, [low, high], btype='bandstop')
y = lfilter(i, u, data)
return y
# Filtro taglia banda, passa alta
def butter_highpass_filter(data, cutoff, fs, order=5):
nyq = 0.5 * fs
normal_cutoff = cutoff / nyq
b, a = butter(order, normal_cutoff, btype='high', analog=False)
y = lfilter(b, a, data)
return y
|
koike-ya/eeglibrary
|
eeglibrary/src/chb_mit_cnn_spectrogram.py
|
chb_mit_cnn_spectrogram.py
|
py
| 1,357 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "scipy.signal.spectrogram",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "scipy.signal",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "numpy.delete",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.s_",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "numpy.delete",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "numpy.s_",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "numpy.delete",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.log10",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "numpy.nan_to_num",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "scipy.signal.butter",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "scipy.signal.lfilter",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "scipy.signal.butter",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "scipy.signal.lfilter",
"line_number": 46,
"usage_type": "call"
}
] |
8017799306
|
import tkinter as tk
from tkinter import ttk
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure
import numpy as np
class GraphPlt():
def __init__(self):
"""インスタンス化。Figureの作成
"""
self.fig = Figure(figsize=(6,6))
def plt(self, title="plot graph",topLevel=True,kind_grid=None, is_legend = False,is_twin=False, rotate_xlim=0,show_bar=20):
"""作成したグラフのプロット
Args:
title (str, optional): ウィンドウのタイトル. Defaults to "plot graph".
topLevel (bool, optional): ウィンドウをトップレベルにするか. Defaults to True.
kind_grid (str, optional): グリッドの有無と方向. Defaults to None.
is_legend (bool, optional): 凡例の有無. Defaults to False.
is_twin (bool, optional): 2軸グラフか否か. Defaults to False.
rotate_xlim (int, optional): x軸ラベルの角度. Defaults to 0.
show_bar (int, optional): スクロールのスケール. Defaults to 20.
"""
if topLevel:
frame = tk.Toplevel()
else:
frame = tk.Tk()
frame.focus_force()
frame.title(title)
canvasFrame = tk.Frame(frame)
canvasFrame.pack(side=tk.TOP)
controlFrame = tk.Frame(frame)
controlFrame.pack(side=tk.BOTTOM)
canvas = FigureCanvasTkAgg(self.fig, canvasFrame)
tmp = canvas.get_tk_widget()
tmp.pack(side=tk.TOP, fill=tk.BOTH, expand=True)
showbars = show_bar
if not kind_grid == None:
self.ax.grid(axis = kind_grid,linestyle="--")
if is_legend:
self.ax.legend()
if is_twin:
self.ax.legend(self.twin_label_handle[0][0]+self.twin_label_handle[1][0],self.twin_label_handle[0][1]+self.twin_label_handle[1][1],loc=2,borderaxespad=0.)
self.fig.autofmt_xdate(rotation=rotate_xlim)
def draw_plot(pos):
pos_ = float(pos)
self.ax.set_xlim(pos_-1, pos_+showbars+1)
canvas.draw()
y_scale = ttk.Scale(controlFrame, from_=0.0, to=self.data_len-showbars, length=480, orient=tk.HORIZONTAL, command=draw_plot)
y_scale.pack(fill=tk.X)
btn = tk.Button(controlFrame, text="閉じる", command = frame.destroy)
btn.pack()
draw_plot(0)
def make_graph(self, x, y, graph_type,color = None,marker = None,label=None):
"""最初のグラフの作成
Args:
x (list)): x軸のデータ
y (list): y軸のデータ
graph_type (str)): グラフの種類
color (str, optional): グラフの色. Defaults to None.
label (str, optional): グラフのラベル. Defaults to None.
"""
self.axes_set()
self.handler, self.label = self.graph_set(self.ax,x, y, graph_type,color,marker,label)
def add_graph(self, x, y, graph_type,color = None,marker=None,label=None):
"""グラフの追加
Args:
x (list): x軸のデータ
y (list): y軸のデータ
graph_type (str)): グラフの種類
color (str, optional): グラフの色. Defaults to None.
label (str, optional): グラフのラベル. Defaults to None.
"""
self.graph_set(self.ax,x, y, graph_type,color,marker,label)
def twin_axes(self):
"""2軸グラフにする
"""
self.ax_t = self.ax.twinx()
self.twin_label_handle=list()
self.twin_label_handle.append((self.handler,self.label))
def twin_add_graph(self,x,y,graph_type,color=None, marker = None,label=None):
"""2軸グラフに新たなデータの追加
Args:
x (list): x軸のデータ
y (list)): y軸のデータ
graph_type (str): グラフの種類
color (str, optional): グラフの色. Defaults to None.
label (str, optional): グラフのラベル. Defaults to None.
"""
handler, labels=self.graph_set(self.ax_t,x,y,graph_type,color,marker,label)
self.twin_label_handle.append((handler,labels))
def axes_set(self):
"""新たなグラフの追加
"""
self.ax = self.fig.add_subplot(111)
def graph_set(self,ax,x,y,graph_type,color,marker,label):
"""グラフのセット
Args:
ax (plt.axes): 追加先のグラフ
x (list): x軸のデータ
y (list): y軸のデータ
graph_type (str): グラフの種類
color (str): グラフの色
label (str): グラフのラベル
Returns:
[type]: [description]
"""
g = self._graph_select(ax,graph_type)
if graph_type=="line":
g(x,y,color=color,label=label,marker=marker)
else:
g(x,y,color=color,label=label)
self.data_len = len(x)
handler, label = ax.get_legend_handles_labels()
return handler, label
def set_span_xlim(self,span,last=0,first=0,delta = None):
"""x軸の設定
Args:
span (int): 間隔
last (int, optional): 最後の数値. Defaults to 0.
first (int, optional): 最初のデータ. Defaults to 0.
delta (int, optional): last-firstの値. Defaults to None.
"""
if delta == None:
xtick = [first]
xtick.extend([i * span for i in range(1, (( last-first -1)//span) + 1)])
else:
xtick = [1]
xtick.extend([i * span for i in range(1, ((delta-1)//span) + 1)])
self.ax.set_xticks(xtick)
def _graph_select(self,ax,graph_type):
"""graph_typeから適するグラフメソッドを返す
Args:
ax (plt.axes): 対象のグラフ
graph_type (str): グラフタイプ.line:折れ線, bar:棒
Returns:
plt.axes.method: 適切なグラフメソッド
"""
if graph_type == "bar":
return ax.bar
elif graph_type == "line":
return ax.plot
|
komepi/komepiTkinter
|
komepiTkinter/GraphPlt.py
|
GraphPlt.py
|
py
| 6,239 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "matplotlib.figure.Figure",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "tkinter.Toplevel",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "tkinter.Tk",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "tkinter.Frame",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "tkinter.TOP",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "tkinter.Frame",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "tkinter.BOTTOM",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.backends.backend_tkagg.FigureCanvasTkAgg",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "tkinter.TOP",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "tkinter.BOTH",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "tkinter.ttk.Scale",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "tkinter.ttk",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "tkinter.HORIZONTAL",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "tkinter.X",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "tkinter.Button",
"line_number": 56,
"usage_type": "call"
}
] |
71781182589
|
import glob
import os
from datetime import datetime
import cv2
# isResize = False
isResize = True # change this to False, if resize is not required
images = []
image_src_folder = "images"
files = glob.glob(image_src_folder+"/*")
for file in files:
print(f"Processing: {os.path.basename(file)}")
if file.endswith(".jpg") or file.endswith(".png"):
image = cv2.imread(file)
print(f"original image dimension: {image.shape}") # image shape is displayed as height,width, channel
if isResize:
resize_scale = 0.45
image = cv2.resize(image, None, fx=resize_scale, fy=resize_scale, interpolation=cv2.INTER_AREA) # resize by scale
# image = cv2.resize(image, (640,480), interpolation=cv2.INTER_AREA) # resize by fixed output dimension
print(f"new image dimension: {image.shape}")
images.append(image)
# if your stitch don't go well, try the STITCHER_SCANS
stitcher = cv2.Stitcher.create(mode=cv2.STITCHER_PANORAMA)
# stitcher = cv2.Stitcher.create(mode=cv2.STITCHER_SCANS)
ret, stitched = stitcher.stitch(images)
if ret == cv2.STITCHER_OK:
output_fn = f'{datetime.now().strftime("%Y%m%d_%H%M%S")}.png'
cv2.imshow('Panorama', stitched)
cv2.imwrite(output_fn, stitched)
cv2.waitKey(0)
cv2.destroyAllWindows()
else:
print("Error during Stitching")
|
yptheangel/opencv-starter-pack
|
python/examples/image_stitching/stitching.py
|
stitching.py
|
py
| 1,349 |
python
|
en
|
code
| 8 |
github-code
|
6
|
[
{
"api_name": "glob.glob",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path.basename",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "cv2.imread",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "cv2.resize",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "cv2.INTER_AREA",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "cv2.Stitcher.create",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "cv2.Stitcher",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "cv2.STITCHER_PANORAMA",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "cv2.STITCHER_OK",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "cv2.imshow",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "cv2.imwrite",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "cv2.destroyAllWindows",
"line_number": 37,
"usage_type": "call"
}
] |
1698064062
|
import pygame
# define constants for the window size and stack element size
ELEMENT_WIDTH = 50
ELEMENT_HEIGHT = 200
# initialize Pygame
pygame.init()
# create a window
window = pygame.display.set_mode((1000, 500))
# define a font to use for displaying the stack elements
font = pygame.font.Font(None, 36)
# define a stack to hold the operands
stack = []
# define a list of tokens (operators and operands) in RPN
tokens = ["2", "3", "5", "*", "8", "+", "4", "2", "/", "-"]
# iterate through the tokens
for token in tokens:
# if the token is an operand, push it onto the stack and draw it on the window
if token.isdigit():
stack.append(int(token))
text = font.render(token, True, (255, 255, 255))
window.blit(text, (10, (len(stack) - 1) * ELEMENT_HEIGHT))
# if the token is an operator, pop the top two operands from the stack,
# perform the operation, and push the result back onto the stack
else:
right = stack.pop()
left = stack.pop()
if token == "+":
result = left + right
elif token == "-":
result = left - right
elif token == "*":
result = left * right
elif token == "/":
result = left / right
stack.append(result)
text = font.render(str(result), True, (255, 255, 255))
window.blit(text, (10, (len(stack) - 1) * ELEMENT_HEIGHT))
# update the window to show the new stack element
pygame.display.update()
# pause for a moment to allow the user to see the update
pygame.time.delay(500)
# the final result will be the top item on the stack
result = stack.pop()
text = font.render(str(result), True, (255, 255, 255))
window.blit(text, (10, (len(stack) - 1) * ELEMENT_HEIGHT))
pygame.display.update()
# run the Pygame loop until the user closes the window
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
# quit Pygame
pygame.quit()
|
Dhivyno/Programming-projects
|
All files/test.py
|
test.py
|
py
| 2,005 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "pygame.init",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pygame.display.set_mode",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "pygame.font.Font",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pygame.font",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.update",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "pygame.time.delay",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "pygame.time",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.update",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "pygame.event.get",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "pygame.event",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_name": "pygame.QUIT",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "pygame.quit",
"line_number": 64,
"usage_type": "call"
}
] |
26922909604
|
"""
Imports the various compute backends
"""
from typing import Set
from ..exceptions import InputError, ResourceError
from .cfour import CFOURHarness
from .dftd3 import DFTD3Harness
from .entos import EntosHarness
from .gamess import GAMESSHarness
from .molpro import MolproHarness
from .mopac import MopacHarness
from .mp2d import MP2DHarness
from .nwchem import NWChemHarness
from .psi4 import Psi4Harness
from .rdkit import RDKitHarness
from .terachem import TeraChemHarness
from .torchani import TorchANIHarness
__all__ = ["register_program", "get_program", "list_all_programs", "list_available_programs"]
programs = {}
def register_program(entry_point: 'ProgramHarness') -> None:
"""
Register a new ProgramHarness with QCEngine.
"""
name = entry_point.name
if name.lower() in programs.keys():
raise ValueError('{} is already a registered program.'.format(name))
programs[name.lower()] = entry_point
def unregister_program(name: str) -> None:
"""
Unregisters a given program.
"""
ret = programs.pop(name.lower(), None)
if ret is None:
raise KeyError(f"Program {name} is not registered with QCEngine")
def get_program(name: str, check: bool = True) -> 'ProgramHarness':
"""
Returns a program's executor class
Parameters
----------
check
``True`` Do raise error if program not found. ``False`` is handy for
the specialized case of calling non-execution methods (like parsing for testing)
on the returned ``Harness``.
"""
name = name.lower()
if name not in programs:
raise InputError(f"Program {name} is not registered to QCEngine.")
ret = programs[name]
if check and not ret.found():
raise ResourceError(f"Program {name} is registered with QCEngine, but cannot be found.")
return ret
def list_all_programs() -> Set[str]:
"""
List all programs registered by QCEngine.
"""
return set(programs.keys())
def list_available_programs() -> Set[str]:
"""
List all programs that can be exectued (found) by QCEngine.
"""
ret = set()
for k, p in programs.items():
if p.found():
ret.add(k)
return ret
register_program(Psi4Harness())
register_program(RDKitHarness())
register_program(TorchANIHarness())
register_program(MolproHarness())
register_program(MopacHarness())
register_program(DFTD3Harness())
register_program(TeraChemHarness())
register_program(MP2DHarness())
register_program(GAMESSHarness())
register_program(NWChemHarness())
register_program(CFOURHarness())
register_program(EntosHarness())
|
ChemRacer/QCEngine
|
qcengine/programs/base.py
|
base.py
|
py
| 2,624 |
python
|
en
|
code
| null |
github-code
|
6
|
[
{
"api_name": "exceptions.InputError",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "exceptions.ResourceError",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "typing.Set",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "typing.Set",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "psi4.Psi4Harness",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "rdkit.RDKitHarness",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "torchani.TorchANIHarness",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "molpro.MolproHarness",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "mopac.MopacHarness",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "dftd3.DFTD3Harness",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "terachem.TeraChemHarness",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "mp2d.MP2DHarness",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "gamess.GAMESSHarness",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "nwchem.NWChemHarness",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "cfour.CFOURHarness",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "entos.EntosHarness",
"line_number": 103,
"usage_type": "call"
}
] |
10795571321
|
"""
作为初始代码,目标是有的一个固定大小的球,给定一个初识速度,和一个固定阻力,演示球运动的过程。每一个循环的时间与现实时间一致。
从最初代码开始演示如何一步步完善代码最终完成功能的过程。
"""
import sys, pygame
import os.path
import random
import time
import math
#pygame.font.init()
#myfont = pygame.font.SysFont('Comic Sans MS', 30)
def hit_A(x, y):
return y <= 10
def hit_B(x, y):
return x >= 590
def hit_C(x, y):
return y >= 470
def hit_D(x, y):
return x <= 10
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
print(BASE_DIR)
pygame.init()
screen = pygame.display.set_mode((600, 480))
ball_color = [255, 0, 0]
x, y = random.randint(10, 590), random.randint(10, 390)
speed = random.randint(100, 300)
u = random.random()*math.pi
f = 20
t = time.time()
while 1:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
screen.fill((255, 255, 255))
t1 = time.time()
dt = t1-t
d = speed*dt
dx, dy = d*math.cos(u), d*math.sin(u)
x += dx
y += dy
if hit_B(x, y):
u = math.pi - u
elif hit_C(x,y):
u = 2*math.pi - u
elif hit_D(x,y):
u = 3*math.pi - u
elif hit_A(x, y):
u = 2*math.pi - u
speed -= f*dt
if speed < 0:
speed = 0
# 在前进方向上移动一步
pygame.draw.circle(screen, ball_color, (int(x), int(y)), 10)
#textsurface = myfont.render(f'{dx} {dy}', False, (0, 0, 0))
#screen.blit(textsurface,(x,y))
pygame.display.flip()
t = t1
|
sillyemperor/pygame-study
|
rebound-ball-walls.py
|
rebound-ball-walls.py
|
py
| 1,622 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "os.path.path.dirname",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "os.path.path",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "os.path",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "os.path.path.abspath",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "pygame.init",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "pygame.display.set_mode",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "random.randint",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "random.random",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "math.pi",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "time.time",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "pygame.event.get",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "pygame.event",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "pygame.QUIT",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "math.cos",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "math.sin",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "math.pi",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "math.pi",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "math.pi",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "math.pi",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "pygame.draw.circle",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.flip",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 70,
"usage_type": "attribute"
}
] |
24657079519
|
from django.contrib import admin
from django.urls import reverse
from django.utils.html import format_html
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from . import models
import json
# Register your models here.
class BaseAdmin(admin.ModelAdmin):
list_per_page = 50
list_max_show_all = 200
show_full_result_count = False
preserve_filters = True
@admin.register(models.Statistics)
class StatisticsAdmin(BaseAdmin):
list_display = (
'id', 'jsonGlobalStatistics', 'jsonDomesticStatistics',
'jsonInternationalStatistics', 'modifyTime', 'crawlTime'
)
search_fields = ('crawlTime', 'modifyTime')
def jsonGlobalStatistics(self, obj):
return self.to_json(obj.globalStatistics)
jsonGlobalStatistics.short_description = _('globalStatistics')
jsonGlobalStatistics.admin_order_field = 'globalStatistics'
def jsonDomesticStatistics(self, obj):
return self.to_json(obj.domesticStatistics)
jsonDomesticStatistics.short_description = _('domesticStatistics')
jsonDomesticStatistics.admin_order_field = 'domesticStatistics'
def jsonInternationalStatistics(self, obj):
return self.to_json(obj.internationalStatistics)
jsonInternationalStatistics.short_description \
= _('internationalStatistics')
jsonInternationalStatistics.admin_order_field \
= 'internationalStatistics'
def to_json(self, data):
try:
data = json.loads(data)
except:
return
result = []
for k, v in sorted(data.items()):
result.append(format_html('{}: {}', k, v))
return mark_safe(format_html(
'<pre>{}</pre>', format_html('<br>'.join(result))))
@admin.register(models.City)
class CityAdmin(BaseAdmin):
list_display = (
'countryCode', 'provinceName', 'provinceCode', 'cityName',
'currentConfirmedCount', 'confirmedCount', 'suspectedCount',
'curedCount', 'deadCount', 'createTime', 'modifyTime'
)
search_fields = (
'cityName', 'countryCode', 'provinceCode', 'provinceName'
)
@admin.register(models.Province)
class ProvinceAdmin(BaseAdmin):
list_display = (
'countryCode', 'provinceName',
'currentConfirmedCount', 'confirmedCount', 'suspectedCount',
'curedCount', 'deadCount', 'createTime', 'modifyTime'
)
search_fields = ('provinceName', 'countryCode')
@admin.register(models.Country)
class CountryAdmin(BaseAdmin):
list_display = (
'continents', 'countryCode', 'countryName', 'countryFullName',
'currentConfirmedCount', 'confirmedCount', 'suspectedCount',
'curedCount', 'deadCount', 'createTime', 'modifyTime'
)
search_fields = (
'continents', 'countryFullName', 'countryCode', 'countryName'
)
@admin.register(models.CountryCode)
class CountryCodeAdmin(BaseAdmin):
list_display = (
'numericCode', 'countryCode', 'shortCountryCode', 'countryName',
'englishCountryName', 'englishCountryFullName', 'comment'
)
search_fields = (
'numericCode', 'countryCode', 'shortCountryCode', 'countryName',
'englishCountryName', 'englishCountryFullName', 'comment'
)
|
leafcoder/django-covid19
|
django_covid19/admin.py
|
admin.py
|
py
| 3,276 |
python
|
en
|
code
| 155 |
github-code
|
6
|
[
{
"api_name": "django.contrib.admin.ModelAdmin",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "django.utils.html.format_html",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "django.utils.safestring.mark_safe",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "django.utils.html.format_html",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "django.utils.html.format_html",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "django.contrib.admin.register",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "django.contrib.admin",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "django.contrib.admin.register",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "django.contrib.admin",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "django.contrib.admin.register",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "django.contrib.admin",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "django.contrib.admin.register",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "django.contrib.admin",
"line_number": 84,
"usage_type": "name"
},
{
"api_name": "django.contrib.admin.register",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "django.contrib.admin",
"line_number": 97,
"usage_type": "name"
}
] |
36396180382
|
from ast import Lambda
from itertools import groupby
import pandas as pd
import pymysql
from pyecharts.charts import Bar, Line, Pie
from pyecharts import options as opts
conn = pymysql.connect(host="localhost", user="root",
passwd="123456", port=3306, db="591")
cursor = conn.cursor()
sql = 'select * from rent'
cursor.execute(sql)
result = cursor.fetchall()
df = pd.read_sql(sql, conn)
location_group = df.groupby(["location"])
location_com = location_group["price"].aggregate(["mean", "count"])
location_com.reset_index(inplace=True)
location_message = location_com.sort_values("count", ascending=False)
locationr = location_message["location"]
l1 = location_message["count"]
l2 = location_message["mean"]
l2 = ['%.2f' % i for i in l2.tolist()]
# print(l2)
# print(location_com)
# #%%
# #租房數&租金直方折線圖
bar = (
Bar(init_opts=opts.InitOpts(width="1200px", height="500px", theme='light'))
.add_xaxis(locationr.tolist())
.add_yaxis("房屋出租數", l1.tolist())
.extend_axis(
yaxis=opts.AxisOpts(
axislabel_opts=opts.LabelOpts(formatter="{value} 元"), interval=10000
)
)
.set_series_opts(label_opts=opts.LabelOpts(is_show=False))
.set_global_opts(
title_opts=opts.TitleOpts(title="台北市各行政區出租房數&平均租金"),
yaxis_opts=opts.AxisOpts(
axislabel_opts=opts.LabelOpts(formatter="{value} 間")),
)
)
line = Line().add_xaxis(locationr.tolist()).add_yaxis(
"平均租金", l2, yaxis_index=1)
bar.overlap(line)
bar.render("台北市各行政區出租房數&平均租金.html")
# 出租房面積圓環圖
square_info = df['size'].astype(float)
print(type(square_info[0]))
bins = [0, 10, 20, 40, 60, 100, 300]
level = ['0-10坪', '10-20坪', '20-40坪', '40-60坪', '60-100坪', '100-300坪']
square_stage = pd.cut(square_info, bins=bins,
labels=level).value_counts().sort_index()
attr = square_stage.index.tolist()
v1 = square_stage.values.tolist()
pie = (
Pie()
.add("", [list(z)for z in zip(attr, v1)], radius=[80, 150])
# 加入百分比
.set_series_opts(label_opts=opts.LabelOpts(is_show=True, formatter="{d}%"))
.set_global_opts(title_opts=opts.TitleOpts(title="台北市出租房房屋面積分布",
pos_left="center",
pos_bottom="center"),
legend_opts=opts.LegendOpts(pos_left="left",
orient="vertical"),
))
pie.render("台北市出租房房屋面積分布.html")
# #%%
# 出租房型疊加直方圖
mask = (df["kind"] != "其他") # 過濾掉其他
location_group = df[mask].groupby("location")
location_values = [k[0] for k in location_group]
gp = df[mask].sort_values("location").groupby("kind")
s1 = gp.get_group("獨立套房")["location"].value_counts().tolist()
s2 = gp.get_group("分租套房")["location"].value_counts().tolist()
s3 = gp.get_group("雅房")["location"].value_counts().tolist()
s4 = gp.get_group("整層住家")["location"].value_counts().tolist()
# s5 = gp.get_group("車位")["location"].value_counts().tolist()
bar = (
Bar(init_opts=opts.InitOpts(width="1200px", height="500px", theme='light'))
.add_xaxis(location_values)
.add_yaxis("獨立套房", s1, stack="stack1")
.add_yaxis("分租套房", s2, stack="stack1")
.add_yaxis("雅房", s3, stack="stack1")
.add_yaxis("整層住家", s4, stack="stack1")
# .add_yaxis("車位", s5, stack="stack1")
.set_series_opts(label_opts=opts.LabelOpts(is_show=False))
.set_global_opts(
title_opts=opts.TitleOpts(title="房型分類"),
yaxis_opts=opts.AxisOpts(
axislabel_opts=opts.LabelOpts(formatter="{value} 間")),
)
)
bar.render("台北市各行政區出租房型分類.html")
# -----------------------------------------
# 單坪租金折現面積圖
df["location"] = df["location"].apply(
lambda x: "".join([i for i in x if not i.isdigit()]))
df = (df[df["kind"].isin(["雅房", "整層住家", "獨立套房", "分租套房", "車位"])].
groupby(["location", "kind"])[["price", "size"]]
.sum()
.reset_index()
.sort_values("location"))
df.insert(4, column="average", value=df["price"]//df["size"])
line = (
Line()
.add_xaxis(location_values)
.add_yaxis("雅房", df.groupby("kind").get_group("雅房")["average"],
areastyle_opts=opts.AreaStyleOpts(opacity=0.5))
.add_yaxis("整層住家", df.groupby("kind").get_group("整層住家")["average"],
areastyle_opts=opts.AreaStyleOpts(opacity=0.4))
.add_yaxis("獨立套房", df.groupby("kind").get_group("獨立套房")["average"],
areastyle_opts=opts.AreaStyleOpts(opacity=0.3))
.add_yaxis("分租套房", df.groupby("kind").get_group("分租套房")["average"],
areastyle_opts=opts.AreaStyleOpts(opacity=0.2))
# .add_yaxis("車位", df.groupby("kind").get_group("車位")["average"],
# areastyle_opts=opts.AreaStyleOpts(opacity=0.1))
.set_global_opts(title_opts=opts.TitleOpts(title="各房型單坪租金"))
)
line.render("單坪租金圖.html")
# ------------------------------------------------------
|
dichotomania/project
|
visual.py
|
visual.py
|
py
| 5,336 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pymysql.connect",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pandas.read_sql",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pyecharts.charts.Bar",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "pyecharts.options.InitOpts",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "pyecharts.options",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "pyecharts.options.AxisOpts",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "pyecharts.options",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "pyecharts.options.LabelOpts",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "pyecharts.options",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "pyecharts.options.LabelOpts",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "pyecharts.options",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "pyecharts.options.TitleOpts",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "pyecharts.options",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "pyecharts.options.AxisOpts",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "pyecharts.options",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "pyecharts.options.LabelOpts",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "pyecharts.options",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "pyecharts.charts.Line",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "pandas.cut",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "pyecharts.charts.Pie",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "pyecharts.options.LabelOpts",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "pyecharts.options",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "pyecharts.options.TitleOpts",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "pyecharts.options",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "pyecharts.options.LegendOpts",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "pyecharts.options",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "pyecharts.charts.Bar",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "pyecharts.options.InitOpts",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "pyecharts.options",
"line_number": 95,
"usage_type": "name"
},
{
"api_name": "pyecharts.options.LabelOpts",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "pyecharts.options",
"line_number": 102,
"usage_type": "name"
},
{
"api_name": "pyecharts.options.TitleOpts",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "pyecharts.options",
"line_number": 104,
"usage_type": "name"
},
{
"api_name": "pyecharts.options.AxisOpts",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "pyecharts.options",
"line_number": 105,
"usage_type": "name"
},
{
"api_name": "pyecharts.options.LabelOpts",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "pyecharts.options",
"line_number": 106,
"usage_type": "name"
},
{
"api_name": "pyecharts.charts.Line",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "pyecharts.options.AreaStyleOpts",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "pyecharts.options",
"line_number": 129,
"usage_type": "name"
},
{
"api_name": "pyecharts.options.AreaStyleOpts",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "pyecharts.options",
"line_number": 131,
"usage_type": "name"
},
{
"api_name": "pyecharts.options.AreaStyleOpts",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "pyecharts.options",
"line_number": 133,
"usage_type": "name"
},
{
"api_name": "pyecharts.options.AreaStyleOpts",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "pyecharts.options",
"line_number": 135,
"usage_type": "name"
},
{
"api_name": "pyecharts.options.TitleOpts",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "pyecharts.options",
"line_number": 138,
"usage_type": "name"
}
] |
14564990421
|
import numpy as np
from IO.Pinocchio.ReadPinocchio import mf
import matplotlib.pyplot as plt
# Box Size
boxsize = 150.0
aux = mf("../TestRuns/pinocchio.1.9775.example.mf.out", "mf")
mf0 = mf("../TestRuns/pinocchio.1.9775.example.catalog.out", "catalog", 64, boxsize)
mf0.dndm_teo = np.interp(mf0.m, aux.m, aux.dndm_teo)
mf0.dndlnm_teo = np.interp(mf0.m, aux.m, aux.dndlnm_teo)
plt.loglog(mf0.m, mf0.dndlnm, label='dn/dm Pin.')
plt.loglog(mf0.m, mf0.dndlnm_teo, label='dn/dm Teo.')
plt.legend()
plt.show()
plt.plot(mf0.m, mf0.dndlnm/mf0.dndlnm_teo, label='dn/dlnm Pin.')
plt.axhline(1.0)
plt.ylim([0.5, 1.05])
plt.xscale('log')
plt.legend()
plt.show()
|
TiagoBsCastro/PITILESS-SLICER
|
Test/test_mf.py
|
test_mf.py
|
py
| 657 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "IO.Pinocchio.ReadPinocchio.mf",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "IO.Pinocchio.ReadPinocchio.mf",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "numpy.interp",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "numpy.interp",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.loglog",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.loglog",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.axhline",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylim",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xscale",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 23,
"usage_type": "name"
}
] |
39122258660
|
from service_info import dp, bot, greeting, download_url, path_to_file_url, PhotoStatesGroup, dict_config, lang_dict
import aiohttp
from visor import tess_visor, easy_visor, keras_visor
from aiogram import types, executor
from keyboards import type_kb, lang_kb
from aiogram.dispatcher import FSMContext
import logging
import shutil
async def on_startup(_):
logging.info('bot was started')
@dp.message_handler(commands = ['start'])
async def command_start(message: types.Message):
logging.info('User sent a command /start')
await bot.send_message(chat_id=message.chat.id, text=greeting)
await bot.send_sticker(chat_id=message.chat.id, sticker='CAACAgIAAxkBAAEJO-Jkf5GZ-dCO4T3wGzzFjksgFB_JgwACYAIAAgvNDgNERok1XlXTOS8E')
await PhotoStatesGroup.photo.set()
@dp.message_handler(content_types=['photo'], state=PhotoStatesGroup.photo)
async def photo_handler(message: types.message, state: FSMContext):
logging.info('State: Photo ')
logging.info('The bot received the photo ')
picture = bytearray()
logging.info('Aiohttp request is processing')
async with aiohttp.ClientSession() as session:
resp = await session.get(download_url+message.photo[2].file_id)
resp = await resp.json(encoding='UTF-8')
async with session.get(path_to_file_url+resp['result']['file_path']) as responce:
async for chunk in responce.content.iter_chunked(64*1024):
picture += chunk
logging.info('Photo has been downloaded from Telegram Server')
async with state.proxy() as data:
data['photo'] = picture
logging.info('Photo saved in MemoryStorage')
await message.answer('Какой тип больше подходит данному тексту?', reply_markup=type_kb)
await PhotoStatesGroup.next()
@dp.message_handler(state=PhotoStatesGroup.type_photo)
async def type_picture(message: types.message, state: FSMContext):
logging.info('State: Mode for OCR')
logging.info('Mode for OCR was recieved')
if message.text in list(dict_config.keys()):
async with state.proxy() as data:
logging.info('Mode saved in MemoryStorage')
data['type'] = message.text
else:
await message.answer('Некорретный ответ, выбрал стандартный режим')
logging.warning('Was set a standart Mode. User sent uncorrect mode')
async with state.proxy() as data:
data['type'] = 'Однородный блок текста на картинке'
logging.info('Mode saved in MemoryStorage')
await message.answer('Какой язык на картинке?', reply_markup=lang_kb)
await PhotoStatesGroup.lang.set()
@dp.message_handler(state=PhotoStatesGroup.lang)
async def type_picture(message: types.message, state: FSMContext):
logging.info('State: Language')
logging.info('Language was recieved')
if message.text in list(lang_dict.keys()):
async with state.proxy() as data:
data['lang'] = message.text
else:
await message.answer('Некорректный ответ. Выбран английский язык по умолчанию')
async with state.proxy() as data:
data['lang'] = 'Английский'
logging.info('Uncorrect language. Was set a stardart language')
logging.info('Language was saved in MemoryStorage')
await message.answer('Обработка... Это может занять минуту.')
logging.info("Was start a function 'Visor'. Data was sent to processing")
text = tess_visor(image=data['photo'], type_picture=data['type'], language=data['lang'])
await message.answer(f" {text[0]}\n Pytessart\n Time: {text[1]}")
text = easy_visor(image=data['photo'], type_picture=data['type'], language=data['lang'])
await message.answer(f" {text[0]}\n EasyOCR\n Time: {text[1]}")
text = keras_visor()
await message.answer(f" {text[0]}\n KerasOCR\n Time: {text[1]}")
shutil.rmtree('images')
await message.answer('Ожидаю следующую картинку!')
await PhotoStatesGroup.photo.set()
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO, filename="bot.log", filemode="w", format="%(asctime)s %(levelname)s %(message)s")
executor.start_polling(dispatcher=dp, skip_updates = True, on_startup=on_startup)
|
dfgion/VisorBot
|
bot.py
|
bot.py
|
py
| 4,388 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "logging.info",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "aiogram.types.Message",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "aiogram.types",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "logging.info",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "service_info.bot.send_message",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "service_info.bot",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "service_info.greeting",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "service_info.bot.send_sticker",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "service_info.bot",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "service_info.PhotoStatesGroup.photo.set",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "service_info.PhotoStatesGroup.photo",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "service_info.PhotoStatesGroup",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "service_info.dp.message_handler",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "service_info.dp",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "aiogram.types.message",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "aiogram.types",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "aiogram.dispatcher.FSMContext",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "logging.info",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "aiohttp.ClientSession",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "service_info.download_url",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "service_info.path_to_file_url",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "logging.info",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "keyboards.type_kb",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "service_info.PhotoStatesGroup.next",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "service_info.PhotoStatesGroup",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "service_info.dp.message_handler",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "service_info.dp",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "service_info.PhotoStatesGroup.photo",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "service_info.PhotoStatesGroup",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "aiogram.types.message",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "aiogram.types",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "aiogram.dispatcher.FSMContext",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "logging.info",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "service_info.dict_config.keys",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "service_info.dict_config",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "logging.info",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "logging.warning",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "keyboards.lang_kb",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "service_info.PhotoStatesGroup.lang.set",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "service_info.PhotoStatesGroup.lang",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "service_info.PhotoStatesGroup",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "service_info.dp.message_handler",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "service_info.dp",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "service_info.PhotoStatesGroup.type_photo",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "service_info.PhotoStatesGroup",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "aiogram.types.message",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "aiogram.types",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "aiogram.dispatcher.FSMContext",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "logging.info",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "service_info.lang_dict.keys",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "service_info.lang_dict",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "logging.info",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "visor.tess_visor",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "visor.easy_visor",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "visor.keras_visor",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "shutil.rmtree",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "service_info.PhotoStatesGroup.photo.set",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "service_info.PhotoStatesGroup.photo",
"line_number": 79,
"usage_type": "attribute"
},
{
"api_name": "service_info.PhotoStatesGroup",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "service_info.dp.message_handler",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "service_info.dp",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "service_info.PhotoStatesGroup.lang",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "service_info.PhotoStatesGroup",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "logging.basicConfig",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 82,
"usage_type": "attribute"
},
{
"api_name": "aiogram.executor.start_polling",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "aiogram.executor",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "service_info.dp",
"line_number": 83,
"usage_type": "name"
}
] |
13303759601
|
import requests, asyncio
from app import ACCESS_TOKEN, PYONET_API_URL, db
from app.tools.p3log import P3Log
class Poller:
def __init__(self):
self.p3log = P3Log("poller.log")
self.poll_task = None
self.devices = []
async def test_access_token(self):
try:
r = requests.get(f"{PYONET_API_URL}/auth/test/api_key", headers={"Authorization": f"{ACCESS_TOKEN}"})
r.raise_for_status()
self.p3log.log_success("Successfully authenticated with Pyonet-API")
return True
except requests.exceptions.ConnectionError as ce:
self.p3log.log_warning(f"Could not connect to Pyonet-API. Suggested: check if Pyonet-API is running and the PYONET_API_URL is correctly configured in the .env. Error: {str(ce)}")
return False
except Exception as e:
self.p3log.log_error(f"ACCESS_TOKEN is invalid. Suggested: generate a new access token from the Pyonet-Dashboard interface and add it to the .env file. {str(e)}")
return False
async def init_polling(self):
''' Retrieves devices from Pyonet-API and starts the polling loop
'''
try:
r = requests.get(f"{PYONET_API_URL}/poller/devices", headers={"Authorization": ACCESS_TOKEN})
r.raise_for_status()
self.devices = r.json()
self.p3log.log_success(f"Successfully retrieved {len(self.devices)} devices from Pyonet-API")
# Start polling loop
loop = asyncio.get_event_loop()
self.poll_task = loop.create_task(self.start_poll_loop())
except Exception as e:
self.p3log.log_error(f"Initialization failed. Error: {str(e)}")
return False
# Main Polling Loop
async def start_poll_loop(self):
''' Polls devices in a loop
'''
try:
for i in range(10):
await self.poll_devices()
await asyncio.sleep(1)
except asyncio.CancelledError as e:
self.p3log.log(f"Polling loop exited")
return False
async def poll_devices(self):
for device in self.devices:
print("Polling device: ", device["name"])
|
treytose/Pyonet-Poller
|
pyonet-poller/app/libraries/libpoller.py
|
libpoller.py
|
py
| 2,336 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "app.tools.p3log.P3Log",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "app.PYONET_API_URL",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "app.ACCESS_TOKEN",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "requests.exceptions",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "requests.get",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "app.PYONET_API_URL",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "app.ACCESS_TOKEN",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "asyncio.get_event_loop",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "asyncio.sleep",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "asyncio.CancelledError",
"line_number": 50,
"usage_type": "attribute"
}
] |
19346820391
|
from pyparsing import FollowedBy
import tweepy
from app.config import TWITTER_API_KEY,TWITTER_API_KEY_SECRET,get_logger
class TwitterAPI:
def __init__(self,access_token,access_token_secret) -> None:
self.api_key = TWITTER_API_KEY
self.api_key_secret = TWITTER_API_KEY_SECRET
self.access_token = access_token
self.access_token_secret = access_token_secret
self.logger = get_logger()
auth = tweepy.OAuth1UserHandler(self.api_key,self.api_key_secret,self.access_token,self.access_token_secret)
self.api = tweepy.API(auth,parser=tweepy.parsers.JSONParser())
def get_user_profile(self,user_id,screen_name):
try:
profile = self.api.get_user(user_id=user_id,screen_name=screen_name,include_entities=1)
except Exception as e:
self.logger.error(f"Something went wrong while getting user profile: {str(e)}")
return None
return{
"user_id":profile['id_str'],
"name":profile['name'],
"description":profile['description'],
"screen_name":profile['screen_name'],
"profile_image_url":profile['profile_image_url']
}
def get_user_feed(self, page_num):
feed = []
try:
for page in tweepy.Cursor(self.api.home_timeline, tweet_mode="extended").pages(page_num):
feed = page
except Exception as e:
self.logger.error(f"Something went wrong while fetching Feed: {str(e)}")
return None
return feed
# geocode format --> '18.520430,73.856743,25km' (string)
def get_searched_tweets(self, query, page ,geocode):
searched_tweets=None
try:
for pageResult in tweepy.Cursor(self.api.search_tweets, q=query, geocode=geocode, tweet_mode="extended").pages(page):
searched_tweets = pageResult
except Exception as e:
self.logger.error(f"Something went wrong while searching Topic: {str(e)}")
return None
return searched_tweets
def upload_media(self,filename):
try:
media = self.api.media_upload(filename=filename)
except Exception as e:
self.logger.error(f"Something went wrong while uploading the File: {str(e)}")
return None
return media['media_id_string']
def create_tweet(self,text,media_ids):
try:
new_tweet = self.api.update_status(status =text,media_ids = media_ids)
except Exception as e:
self.logger.error(f"Something went wrong while creating new tweet: {str(e)}")
return None
return new_tweet
def reply_tweet(self, text, media_ids, tweet_id):
try:
reply = self.api.update_status(status = text, media_ids = media_ids, in_reply_to_status_id = tweet_id , auto_populate_reply_metadata=True)
except Exception as e:
self.logger.error(f"Something went wrong while replying to tweet: {str(e)}")
return None
return reply
def get_tweet(self, tweet_id):
try:
tweet = self.api.get_status(id = tweet_id)
except Exception as e:
self.logger.error(f"Something went wrong while fetching tweet by id: {str(e)}")
return None
return tweet
def favorites_tweet(self, tweet_id):
try:
fav_tweet = self.api.create_favorite(id = tweet_id)
except Exception as e:
self.logger.error(f"Something went wrong while liking the tweet: {str(e)}")
return None
return fav_tweet
def destory_favorite_tweet(self,tweet_id):
try:
destroy_tweet = self.api.destroy_favorite(id = tweet_id)
except Exception as e:
self.logger.error(f"Something went wrong while disliking the tweet: {str(e)}")
return None
return destroy_tweet
def re_tweet(self, tweet_id):
try:
retweet = self.api.retweet(id = tweet_id)
except Exception as e:
self.logger.error(f"Something went wrong while retweeting: {str(e)}")
return None
return retweet
def un_retweet(self, tweet_id):
try:
retweet = self.api.unretweet(id = tweet_id)
except Exception as e:
self.logger.error(f"Something went wrong while unretweeting the tweet: {str(e)}")
return None
return retweet
def get_user_posts(self, user_id, screen_name, page):
posts = []
try:
for page in tweepy.Cursor(self.api.user_timeline, user_id=user_id, screen_name=screen_name, tweet_mode="extended").pages(page):
posts = page
except Exception as e:
self.logger.error(
f"Something went wrong while fetching Posts: {str(e)}")
return None
return posts
def get_user_mentions_timeline(self, page):
mentions = []
try:
for page in tweepy.Cursor(self.api.mentions_timeline, tweet_mode="extended").pages(page):
mentions = page
except Exception as e:
self.logger.error(
f"Something went wrong while fetching Posts: {str(e)}")
return None
return mentions
def get_user_followers(self, user_id, screen_name, page):
followers = []
try:
for page in tweepy.Cursor(self.api.get_followers, user_id=user_id, screen_name=screen_name, tweet_mode="extended").pages(page):
followers = page
except Exception as e:
self.logger.error(
f"Something went wrong while fetching User Followers: {str(e)}")
return None
return followers
def get_user_following(self, user_id, screen_name, page):
following = []
try:
for page in tweepy.Cursor(self.api.get_friends, user_id=user_id, screen_name=screen_name, tweet_mode="extended").pages(page):
following = page
except Exception as e:
self.logger.error(
f"Something went wrong while fetching User Following: {str(e)}")
return None
return following
def get_user(self, user_id, screen_name):
try:
profile = self.api.get_user(
user_id=user_id, screen_name=screen_name)
except Exception as e:
self.logger.error(
f"Something went wrong while fetching User Profile: {str(e)}")
return None
return profile
def follow_user(self, user_id):
try:
follow_tweet = self.api.create_friendship(
user_id=user_id, follow=True)
except Exception as e:
self.logger.error(
f"Something went wrong while following the user: {str(e)}")
return None
return follow_tweet
def unfollow_user(self, user_id):
try:
unfollow_tweet = self.api.destroy_friendship(
user_id=user_id, follow=True)
except Exception as e:
self.logger.error(
f"Something went wrong while following the user: {str(e)}")
return None
return unfollow_tweet
def get_followers(self, user_id):
try:
followers = self.api.get_followers(user_id=user_id)
except Exception as e:
self.logger.error(
f"Something went wrong while fetching user followers: {str(e)}")
return None
return followers
def get_my_tweets(self, user_id):
try:
tweets = self.api.user_timeline(tweet_mode="extended")
except Exception as e:
self.logger.error(
f"Something went wrong while fetching user tweets: {str(e)}")
return None
return tweets
def get_my_200_tweets(self, user_id):
try:
tweets = self.api.user_timeline(tweet_mode="extended",count=200)
except Exception as e:
self.logger.error(
f"Something went wrong while fetching user tweets: {str(e)}")
return None
return tweets
def get_mentions(self, user_id):
try:
timeline = self.api.mentions_timeline(
user_id=user_id, include_rts=False)
except Exception as e:
self.logger.error(
f"Something went wrong while fetching user mentions timeline: {str(e)}")
return None
return timeline
def get_closest_trends(self,latitude,longitude):
try:
trends = self.api.closest_trends(latitude,longitude)
except Exception as e:
self.logger.error(
f"Something went wrong while fetching closest trends: {str(e)}")
return None
return trends
def get_place_trends(self,woeId):
try:
trends = self.api.get_place_trends(woeId)
except Exception as e:
self.logger.error(
f"Something went wrong while fetching place trends: {str(e)}")
return None
return trends
|
Socialet/web-backend
|
app/services/api/twitterAPI.py
|
twitterAPI.py
|
py
| 9,191 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "app.config.TWITTER_API_KEY",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "app.config.TWITTER_API_KEY_SECRET",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "app.config.get_logger",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "tweepy.OAuth1UserHandler",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "tweepy.API",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "tweepy.parsers.JSONParser",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "tweepy.parsers",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "tweepy.Cursor",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "tweepy.Cursor",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "tweepy.Cursor",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "tweepy.Cursor",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "tweepy.Cursor",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "tweepy.Cursor",
"line_number": 152,
"usage_type": "call"
}
] |
7725611508
|
import itertools
def det_4x4(matrix):
if len(matrix) != 4 or len(matrix[0]) != 4:
raise ValueError("A matriz não é 4x4")
indices = [0, 1, 2, 3]
permuta = itertools.permutations(indices)
det = 0
for perm in permuta:
sinal = 1
for i in range(4):
for j in range(i + 1, 4):
if perm[i] > perm[j]:
sinal *= -1
produto = 1
for i in range(4):
produto *= matrix[i][perm[i]]
det += sinal * produto
return det
# solicita ao usuário para digitar os elementos da matriz 4x4
matrix = []
for i in range(4):
row = input(f"Digite os elementos da {i+1}ª linha separados por espaço: ")
matrix.append([int(x) for x in row.split()])
# calcula a determinante usando a fórmula de Leibniz
det = det_4x4(matrix)
# imprime o resultado
print(f"A determinante da matriz é {det}.")
|
AlexApLima/CalculoDeterminantesLeibniz
|
CalFormula.py
|
CalFormula.py
|
py
| 903 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "itertools.permutations",
"line_number": 7,
"usage_type": "call"
}
] |
31356519066
|
import pygame
import sys
# 게임 창 크기 설정
screen_width = 800
screen_height = 600
# 버튼 클래스 정의
class Button:
def __init__(self, x, y, width, height, idle_image, hover_image):
self.rect = pygame.Rect(x, y, width, height)
self.idle_image = idle_image
self.hover_image = hover_image
self.image = idle_image
self.clicked = False
def draw(self, surface):
surface.blit(self.image, self.rect)
def handle_event(self, event):
if event.type == pygame.MOUSEMOTION:
if self.rect.collidepoint(event.pos):
self.image = self.hover_image
else:
self.image = self.idle_image
elif event.type == pygame.MOUSEBUTTONDOWN:
if self.rect.collidepoint(event.pos):
self.clicked = True
elif event.type == pygame.MOUSEBUTTONUP:
self.clicked = False
# 게임 클래스 정의
class Game:
def __init__(self):
pygame.init()
self.screen = pygame.display.set_mode((screen_width, screen_height))
pygame.display.set_caption("Empire Builder")
self.clock = pygame.time.Clock()
# 버튼 이미지 로드
self.button_idle_img = pygame.image.load("resources/images/slimes.png").convert_alpha()
self.button_hover_img = pygame.image.load("resources/images/slime_top.png").convert_alpha()
# 버튼 생성
button_width = 150
button_height = 50
button_x = (screen_width - button_width) // 2
button_y = (screen_height - button_height) // 2
self.button = Button(button_x, button_y, button_width, button_height, self.button_idle_img, self.button_hover_img)
def run(self):
running = True
while running:
self.clock.tick(60)
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
# 버튼 이벤트 처리
self.button.handle_event(event)
self.update()
self.render()
pygame.quit()
sys.exit()
def update(self):
# 게임 상태 업데이트
if self.button.clicked:
print("버튼이 클릭되었습니다!")
def render(self):
# 게임 화면 그리기
self.screen.fill((0, 0, 0)) # 검은색 배경
self.button.draw(self.screen) # 버튼 그리기
pygame.display.flip()
# 게임 시작
game = Game()
game.run()
|
frogress/prac_pygame
|
opentutorials/chatgpt.py
|
chatgpt.py
|
py
| 2,576 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pygame.Rect",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pygame.MOUSEMOTION",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "pygame.MOUSEBUTTONDOWN",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "pygame.MOUSEBUTTONUP",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "pygame.init",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "pygame.display.set_mode",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.set_caption",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "pygame.time.Clock",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "pygame.time",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "pygame.image.load",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "pygame.image.load",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "pygame.event.get",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "pygame.event",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "pygame.QUIT",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "pygame.quit",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "pygame.display.flip",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 77,
"usage_type": "attribute"
}
] |
37841455471
|
import logging
logger = logging.getLogger()
logger.setLevel(level="DEBUG")
logging.Formatter
# 创建文本处理器
file_handle = logging.FileHandler("./log.txt", mode="a", encoding="utf-8")
file_handle.setLevel(level="ERROR")
logger.addHandler(file_handle)
fmt = "%(name)s--->%(message)s--->%(asctime)s"
logging.basicConfig(level="DEBUG", format=fmt)
logging.debug("This is debug message")
logging.info("This is info message")
logging.warning("This is warning message")
logging.error("This is error")
logging.critical("This is critical message")
|
amespaces/pythonProject
|
common/创建文件处理器.py
|
创建文件处理器.py
|
py
| 552 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "logging.getLogger",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "logging.Formatter",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "logging.FileHandler",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "logging.basicConfig",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "logging.warning",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "logging.error",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "logging.critical",
"line_number": 19,
"usage_type": "call"
}
] |
1544268645
|
import itertools
def test(kenken_grid):
n = kenken_grid[0][0]
dom = []
for i in range(n):
dom.append(i + 1)
vars = []
for i in dom:
for j in dom:
vars.append('V{}{}'.format(i, j))
cons = []
for i in range(n):
vars_row = vars[(i * n): ((i + 1) * n)]
vars_col = []
for j in range(n):
vars_col.append(vars[i + (j * n)])
for var_pair in itertools.combinations(vars_row, 2):
cons.append("C-{},{})".format(var_pair[0],var_pair[1]))
for var_pair in itertools.combinations(vars_col, 2):
cons.append("C-{},{})".format(var_pair[0],var_pair[1]))
return cons
|
monkeykingg/projects
|
3rd_year/csc384/A2/sol/test.py
|
test.py
|
py
| 691 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "itertools.combinations",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "itertools.combinations",
"line_number": 27,
"usage_type": "call"
}
] |
70740769147
|
from __future__ import print_function
import torch
def diff_mse(x, y):
x_vec = x.view(1, -1).squeeze()
y_vec = y.view(1, -1).squeeze()
return torch.mean(torch.pow((x_vec - y_vec), 2)).item()
def ax_plus_b_vector(x, weight, bias):
return weight.mm(x).add(bias)
def ax_plus_b_scalar(x, weight, bias, h, w, number_of_channels, kernel_size):
result = 0
for c_in in range(number_of_channels):
for i in range(kernel_size):
for j in range(kernel_size):
result += x[c_in, h + i, w + j] * weight[c_in, i, j]
return result + bias
def convolved_image_size(size, kernel_size, padding, stride):
return ((size - kernel_size + 2 * padding) // stride) + 1
def im2col(img, kernel_size, device, stride=1, padding=0):
N_batch, C_in, img_size, _ = img.shape
out_size = convolved_image_size(img_size, kernel_size, padding, stride)
col = torch.zeros((kernel_size, kernel_size, N_batch, C_in, out_size, out_size))
margin = stride * out_size
for x in range(kernel_size):
for y in range(kernel_size):
col[x, y] = img[:, :, x:x + margin:stride, y:y + margin:stride]
return col.view(kernel_size*kernel_size, -1).to(device)
|
IvanProdaiko94/UCU-deep-learning-homework
|
layers/utilities.py
|
utilities.py
|
py
| 1,222 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "torch.mean",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "torch.pow",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "torch.zeros",
"line_number": 32,
"usage_type": "call"
}
] |
72757283709
|
"""
Utilities I can't put anywhere else...
"""
import time
from math import copysign, gcd
from copy import copy
import sys
import numpy as np
import datetime
import functools
def true_if_answer_is_yes(prompt=""):
invalid = True
while invalid:
x = input(prompt)
x = x.lower()
if x[0] == "y":
return True
elif x[0] == "n":
return False
print("Need one of yes/no, Yes/No, y/n, Y/N")
def flatten_list(some_list):
flattened = [item for sublist in some_list for item in sublist]
return flattened
class not_required_flag(object):
def __repr__(self):
return "Not required"
NOT_REQUIRED = not_required_flag()
def group_dict_from_natural(dict_group):
"""
If we're passed a natural grouping dict (eg dict(bonds=["US10", "KR3", "DE10"], equity=["SP500"]))
Returns the dict optimised for algo eg dict(US10=["KR3", "DE10"], SP500=[], ..)
:param dict_group: dictionary of groupings
:type dict_group: dict
:returns: dict
>>> a=dict(bonds=["US10", "KR3", "DE10"], equity=["SP500"])
>>> group_dict_from_natural(a)['KR3']
['US10', 'DE10']
"""
if len(dict_group) == 0:
return dict()
all_names = sorted(
set(sum([dict_group[groupname] for groupname in dict_group.keys()], []))
)
def _return_without(name, group):
if name in group:
g2 = copy(group)
g2.remove(name)
return g2
else:
return None
def _return_group(name, dict_group):
ans = [
_return_without(name, dict_group[groupname])
for groupname in dict_group.keys()
]
ans = [x for x in ans if x is not None]
if len(ans) == 0:
return []
ans = ans[0]
return ans
gdict = dict([(name, _return_group(name, dict_group)) for name in all_names])
return gdict
def str2Bool(x):
if isinstance(x, bool):
return x
return x.lower() in ("t", "true")
def str_of_int(x):
"""
Returns the string of int of x, handling nan's or whatever
:param x: Name of python package
:type x: int or float
:returns: 1.0 or -1.0
>>> str_of_int(34)
'34'
>>> str_of_int(34.0)
'34'
>>> import numpy as np
>>> str_of_int(np.nan)
''
"""
if isinstance(x, int):
return str(x)
try:
return str(int(x))
except BaseException:
return ""
def sign(x):
"""
Return the sign of x (float or int)
:param x: Thing we want sign of
:type x: int, float
:returns: 1 or -1
>>> sign(3)
1.0
>>> sign(3.0)
1.0
>>> sign(-3)
-1.0
>>> sign(0)
1.0
"""
return copysign(1, x)
def value_or_npnan(x, return_value=None):
"""
If x is np.nan return return_value
else return x
:param x: np.nan or other
:return: x or return_value
>>> value_or_npnan(np.nan)
>>> value_or_npnan(np.nan, -1)
-1
>>> value_or_npnan("thing")
'thing'
>>> value_or_npnan(42)
42
"""
try:
if np.isnan(x):
return return_value
else:
pass
# Not a nan will return x
except BaseException:
# Not something that can be compared to a nan
pass
# Either wrong type, or not a nan
return x
def are_dicts_equal(d1, d2):
d1_keys = set(d1.keys())
d2_keys = set(d2.keys())
intersect_keys = d1_keys.intersection(d2_keys)
if len(intersect_keys) != len(d1_keys):
return False
same = set(o for o in intersect_keys if d1[o] == d2[o])
if len(same) != len(d1_keys):
return False
return True
PROGRESS_EXP_FACTOR = 0.9
class progressBar(object):
"""
Example (not docstring as won't work)
import time
thing=progressBar(10000)
for i in range(10000):
# do something
time.sleep(0.001)
thing.iterate()
thing.finished()
"""
def __init__(
self,
range_to_iter,
suffix="Progress",
toolbar_width=80,
show_each_time=False,
show_timings=True,
):
self._start_time = time.time()
self.toolbar_width = toolbar_width
self.current_iter = 0
self.suffix = suffix
self.range_to_iter = range_to_iter
self.range_per_block = range_to_iter / np.float(toolbar_width)
self._how_many_blocks_displayed = -1 # will always display first time
self._show_each_time = show_each_time
self._show_timings = show_timings
self.display_bar()
def estimated_time_remaining(self):
total_iter = self.range_to_iter
iter_left = total_iter - self.current_iter
time_per_iter = self.current_estimate_of_times
if time_per_iter is None:
return 0
return iter_left * time_per_iter
def update_time_estimate(self):
## don't maintain a list per se, instead exponential
time_since_last_call = self.time_since_last_called()
current_estimate = self.current_estimate_of_times
if current_estimate is None:
## seed
current_estimate = time_since_last_call
else:
current_estimate = ((1 - PROGRESS_EXP_FACTOR) * time_since_last_call) + (
PROGRESS_EXP_FACTOR * current_estimate
)
self.current_estimate_of_times = current_estimate
@property
def current_estimate_of_times(self) -> float:
current_estimate = getattr(self, "_current_estimate_of_times", None)
return current_estimate
@current_estimate_of_times.setter
def current_estimate_of_times(self, current_estimate: float):
self._current_estimate_of_times = current_estimate
def time_since_last_called(self) -> float:
time_of_last_call = self.get_and_set_time_of_last_call()
current_time = self.current_time
return current_time - time_of_last_call
def get_and_set_time_of_last_call(self):
time_of_last_iter = copy(getattr(self, "_time_of_last_call", self.start_time))
self._time_of_last_call = self.current_time
return time_of_last_iter
def elapsed_time(self):
return self.current_time - self.start_time
@property
def start_time(self):
return self._start_time
@property
def current_time(self):
return time.time()
def iterate(self):
self.current_iter += 1
self.update_time_estimate()
if self.number_of_blocks_changed() or self._show_each_time:
self.display_bar()
if self.current_iter == self.range_to_iter:
self.finished()
def how_many_blocks_had(self):
return int(self.current_iter / self.range_per_block)
def how_many_blocks_left(self):
return int((self.range_to_iter - self.current_iter) / self.range_per_block)
def number_of_blocks_changed(self):
original_blocks = self._how_many_blocks_displayed
new_blocks = self.how_many_blocks_had()
if new_blocks > original_blocks:
return True
else:
return False
def display_bar(self):
percents = round(100.0 * self.current_iter / float(self.range_to_iter), 1)
if self._show_timings:
time_remaining = self.estimated_time_remaining()
time_elapsed = self.elapsed_time()
total_est_time = time_elapsed + time_remaining
time_str = "(%.1f/%.1f/%.1f secs left/elapsed/total)" % (
time_remaining,
time_elapsed,
total_est_time,
)
else:
time_str = ""
bar = "=" * self.how_many_blocks_had() + "-" * self.how_many_blocks_left()
progress_string = "\0\r [%s] %s%s %s %s\n" % (
bar,
percents,
"%",
self.suffix,
time_str,
)
sys.stdout.write(progress_string)
sys.stdout.flush()
self._how_many_blocks_displayed = self.how_many_blocks_had()
def finished(self):
self.display_bar()
sys.stdout.write("\n")
class quickTimer(object):
def __init__(self, seconds=60):
self._started = datetime.datetime.now()
self._time_limit = seconds
@property
def unfinished(self):
return not self.finished
@property
def finished(self):
time_now = datetime.datetime.now()
elapsed = time_now - self._started
if elapsed.seconds > self._time_limit:
return True
else:
return False
# avoids encoding problems with mongo
_none = ""
def none_to_object(x, object):
if x is _none:
return object
else:
return x
def object_to_none(x, object, y=_none):
if x is object:
return y
else:
return x
def get_unique_list(somelist):
uniquelist = []
for letter in somelist:
if letter not in uniquelist:
uniquelist.append(letter)
return uniquelist
MISSING_STR = -1
def transfer_object_attributes(named_tuple_object, original_object):
kwargs = dict(
[
(field_name, getattr(original_object, field_name))
for field_name in named_tuple_object._fields
]
)
new_object = named_tuple_object(**kwargs)
return new_object
def highest_common_factor_for_list(list_of_ints: list) -> int:
"""
:param list_of_ints:
:return: int
>>> highest_common_factor_for_list([2,3,4])
1
>>> highest_common_factor_for_list([2,6,4])
2
"""
return functools.reduce(gcd, list_of_ints)
def divide_list_of_ints_by_highest_common_factor(list_of_ints: list) -> list:
"""
:param list_of_ints:
:return: list
>>> divide_list_of_ints_by_highest_common_factor([1,2])
[1, 2]
>>> divide_list_of_ints_by_highest_common_factor([2,4])
[1, 2]
>>> divide_list_of_ints_by_highest_common_factor([1,2,3])
[1, 2, 3]
>>> divide_list_of_ints_by_highest_common_factor([1])
[1]
"""
gcd_value = highest_common_factor_for_list(list_of_ints)
new_list = [int(float(x) / gcd_value) for x in list_of_ints]
return new_list
def list_of_ints_with_highest_common_factor_positive_first(list_of_ints: list) -> list:
"""
Used to identify the unique version of a spread or fly contract
:param list_of_ints:
:return: list
>>> list_of_ints_with_highest_common_factor_positive_first([1])
[1]
>>> list_of_ints_with_highest_common_factor_positive_first([-1])
[1]
>>> list_of_ints_with_highest_common_factor_positive_first([1,-1])
[1, -1]
>>> list_of_ints_with_highest_common_factor_positive_first([-1,1])
[1, -1]
>>> list_of_ints_with_highest_common_factor_positive_first([-1,2])
[1, -2]
>>> list_of_ints_with_highest_common_factor_positive_first([-2,2])
[1, -1]
>>> list_of_ints_with_highest_common_factor_positive_first([2,-2])
[1, -1]
>>> list_of_ints_with_highest_common_factor_positive_first([2,-4,2])
[1, -2, 1]
>>> list_of_ints_with_highest_common_factor_positive_first([-2,4,-2])
[1, -2, 1]
"""
new_list = divide_list_of_ints_by_highest_common_factor(list_of_ints)
multiply_sign = sign(new_list[0])
new_list = [int(x * multiply_sign) for x in new_list]
return new_list
def np_convert(val):
"""
Converts the passed numpy value to a native python type
>>> val = np.int64(1)
>>> type(val)
<class 'numpy.int64'>
>>> type(np_convert(val))
<class 'int'>
:param val:
:return: val as native type
"""
return val.item() if isinstance(val, np.generic) else val
if __name__ == "__main__":
import doctest
doctest.testmod()
|
ahalsall/pysystrade
|
syscore/genutils.py
|
genutils.py
|
py
| 11,815 |
python
|
en
|
code
| 4 |
github-code
|
6
|
[
{
"api_name": "copy.copy",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "math.copysign",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "numpy.isnan",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 220,
"usage_type": "call"
},
{
"api_name": "numpy.float",
"line_number": 225,
"usage_type": "call"
},
{
"api_name": "copy.copy",
"line_number": 271,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 285,
"usage_type": "call"
},
{
"api_name": "sys.stdout.write",
"line_number": 333,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 333,
"usage_type": "attribute"
},
{
"api_name": "sys.stdout.flush",
"line_number": 334,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 334,
"usage_type": "attribute"
},
{
"api_name": "sys.stdout.write",
"line_number": 339,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 339,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 344,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 344,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 353,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 353,
"usage_type": "attribute"
},
{
"api_name": "functools.reduce",
"line_number": 415,
"usage_type": "call"
},
{
"api_name": "math.gcd",
"line_number": 415,
"usage_type": "argument"
},
{
"api_name": "numpy.generic",
"line_number": 485,
"usage_type": "attribute"
},
{
"api_name": "doctest.testmod",
"line_number": 491,
"usage_type": "call"
}
] |
28521323695
|
"""change precision of order amount
Revision ID: fb84527fc0b3
Revises: 5c6f2d25c2f0
Create Date: 2018-04-07 18:51:12.160012+00:00
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = 'fb84527fc0b3'
down_revision = '5c6f2d25c2f0'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('order', 'amount',
existing_type=mysql.DECIMAL(precision=2, scale=0),
type_=sa.DECIMAL(precision=10),
existing_nullable=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('order', 'amount',
existing_type=sa.DECIMAL(precision=10),
type_=mysql.DECIMAL(precision=2, scale=0),
existing_nullable=False)
# ### end Alembic commands ###
|
harveyslash/backend-cleaned
|
beatest/migrations/versions/20180407185112_fb84527fc0b3_change_type_of_order_amount.py
|
20180407185112_fb84527fc0b3_change_type_of_order_amount.py
|
py
| 1,011 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "alembic.op.alter_column",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.dialects.mysql.DECIMAL",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.dialects.mysql",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.DECIMAL",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "alembic.op.alter_column",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.DECIMAL",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.dialects.mysql.DECIMAL",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.dialects.mysql",
"line_number": 32,
"usage_type": "name"
}
] |
12940606913
|
import json
import logging
import avro.schema
from avro.datafile import DataFileWriter
from avro.io import DatumWriter
# Login config
logging.basicConfig(
filename='writer.log',
filemode='w',
format='%(asctime)s - %(levelname)s - %(message)s',
level=logging.INFO
)
# read records from sample_data and schema
raw_data = []
try:
with open('raw_data.txt','r') as txt:
for raw_record in txt.readlines():
record = raw_record.strip()
raw_data.append(record)
except Exception as err:
logging.error(err)
# Load schema
try:
schema = avro.schema.parse(open("schema.avsc").read())
except Exception as err:
logging.error(err)
raise
# Write records
try:
with open('encoded_data.avro', 'wb') as f:
writer = DataFileWriter(f, DatumWriter(), schema)
for row in raw_data:
try:
writer.append(json.loads(row))
except Exception as err:
logging.error(err)
logging.info(row.strip())
writer.close()
except Exception as err:
logging.error(err)
raise
|
jocerfranquiz/avro_test
|
write_avro.py
|
write_avro.py
|
py
| 1,116 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "logging.basicConfig",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "logging.error",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "avro.schema.schema.parse",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "avro.schema.schema",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "avro.schema",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "logging.error",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "avro.datafile.DataFileWriter",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "avro.io.DatumWriter",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "logging.error",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "logging.error",
"line_number": 47,
"usage_type": "call"
}
] |
74473909946
|
from datetime import date
from django.test import Client
from django.test import TestCase
from django.urls import resolve
from .views import index, mhs_name, calculate_age
class Lab1UnitTest(TestCase):
def test_hello_name_is_exist(self):
response = Client().get('/lab-1/')
self.assertEqual(response.status_code, 200)
def test_using_index_func(self):
found = resolve('/lab-1/')
self.assertEqual(found.func, index)
def test_name_is_changed(self):
response = Client().get('/lab-1/')
self.assertTemplateUsed(response, 'lab_9/session/login.html')
session = self.client.session
session['user_login'] = 'user'
session['kode_identitas'] = '123'
session.save()
response = self.client.get('/lab-1/')
html_response = response.content.decode('utf8')
self.assertIn('<title>' + mhs_name + '</title>', html_response)
self.assertIn('<h1>Hello my name is ' + mhs_name + '</h1>', html_response)
self.assertFalse(len(mhs_name) == 0)
def test_calculate_age_is_correct(self):
self.assertEqual(0, calculate_age(date.today().year))
self.assertEqual(17, calculate_age(2000))
self.assertEqual(27, calculate_age(1990))
def test_index_contains_age(self):
response = Client().get('/lab-1/')
self.assertTemplateUsed(response, 'lab_9/session/login.html')
session = self.client.session
session['user_login'] = 'username'
session['kode_identitas'] = 'npm'
session.save()
response = self.client.get('/lab-1/')
html_response = response.content.decode('utf8')
self.assertRegex(html_response, r'<article>I am [0-9]\d+ years old</article>')
|
argaghulamahmad/ppw-lab-arga
|
lab_1/tests.py
|
tests.py
|
py
| 1,752 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.test.TestCase",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "django.test.Client",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "django.urls.resolve",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "views.index",
"line_number": 17,
"usage_type": "argument"
},
{
"api_name": "django.test.Client",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "views.mhs_name",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "views.mhs_name",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "views.mhs_name",
"line_number": 32,
"usage_type": "argument"
},
{
"api_name": "views.calculate_age",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "datetime.date.today",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "views.calculate_age",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "views.calculate_age",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "django.test.Client",
"line_number": 40,
"usage_type": "call"
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.