seq_id
string | text
string | repo_name
string | sub_path
string | file_name
string | file_ext
string | file_size_in_byte
int64 | program_lang
string | lang
string | doc_type
string | stars
int64 | dataset
string | pt
string | api
list |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
13895478896
|
########################################################################################
# Run examples from our paper in RBEF
########################################################################################
import sys
import numpy as np
import matplotlib.pyplot as plt
import multiprocessing
import scipy
import scipy.signal
from scipy.integrate import simps
from joblib import Parallel, delayed
from ar_model import *
import pygc.pySpec
import pygc.parametric
import pygc.non_parametric
import pygc.granger
import plot_results
p = int(sys.argv[-1])
if p == 0:
# Generates figure 3 from the paper
print('Generating Figure 3 from RBEF paper...')
N = 5000 # Number of observations
Fs = 200 # Sampling frequency
dt = 1.0 / Fs # Time resolution
C = 0.25 # Coupling parameter
Trials = 5000 # Number of trials
# Covariance matrix
cov = np.array([ [1.00, 0.00],
[0.00, 1.00] ])
f = pygc.pySpec.compute_freq(N, Fs)
S = np.zeros([2,2,N//2+1]) + 1j*np.zeros([2,2,N//2+1])
print('Generating AR model time series...')
Z = ar_model_dhamala(N=N, Trials = Trials, C=C, Fs=Fs, t_start=0, t_stop=None, cov=cov)
print('Estimating spectral matrix from ' + str(Trials) + ' trials...')
for i in range(Trials):
if i%500 == 0:
print('Trial = ' + str(i))
S[0,0] += pygc.pySpec.cxy(X=Z[0,i,:], Y=[], f=f, Fs=Fs) / Trials
S[0,1] += pygc.pySpec.cxy(X=Z[0,i,:], Y=Z[1,i,:], f=f, Fs=Fs) / Trials
S[1,0] += pygc.pySpec.cxy(X=Z[1,i,:], Y=Z[0,i,:], f=f, Fs=Fs) / Trials
S[1,1] += pygc.pySpec.cxy(X=Z[1,i,:], Y=[], f=f, Fs=Fs) / Trials
print('Computing Granger Causality...')
Snew, Hnew, Znew = pygc.non_parametric.wilson_factorization(S, f, Fs, Niterations=30)
Ix2y, Iy2x, Ixy = pygc.granger.granger_causality(S, Hnew, Znew)
print('Saving data...')
np.save('data/fig3.npy', {'f': f, 'S': S, 'H': Hnew, 'Z': Znew, 'Ix2y': Ix2y, 'Iy2x': Iy2x, 'Ixy': Ixy})
print('Plotting results...')
plot_results.fig3()
if p == 1:
# Generates figure 4 from the paper
N = 900 # Number of observations
Fs = 200 # Sampling frequency
dt = 1.0 / Fs # Time resolution
C = 0.25 # Coupling parameter
Trials = 5000 # Number of trials
cov = np.array([ [1.00, 0.00],
[0.00, 1.00] ])
f = pygc.pySpec.compute_freq(N, Fs)
S = np.zeros([2,2,N,N//2+1]) + 1j*np.zeros([2,2,N,N//2+1])
print('Generating AR model time series...')
Z = ar_model_dhamala(N=N, Trials = Trials, C=C, Fs=Fs, t_start=0, t_stop=2.25, cov=cov)
print('Estimating wavelet matrix from ' + str(Trials) + ' trials...')
for i in range(Trials):
if i%500 == 0:
print('Trial = ' + str(i))
Wx = pygc.pySpec.morlet(Z[0,i,:], f, Fs)
Wy = pygc.pySpec.morlet(Z[1,i,:], f, Fs)
S[0,0] += Wx*np.conj(Wx) / Trials
S[0,1] += Wx*np.conj(Wy) / Trials
S[1,0] += Wy*np.conj(Wx) / Trials
S[1,1] += Wy*np.conj(Wy) / Trials
# S = S[:,:,idx,:]
print('Computing Granger Causality...')
def save_granger(S, idx):
Snew, Hnew, Znew = pygc.non_parametric.wilson_factorization(S[:,:,idx,:], f, Fs, Niterations=30, verbose=False)
Ix2y, Iy2x, Ixy = pygc.granger.granger_causality(S[:,:,idx,:], Hnew, Znew)
np.save('data/fig4_'+str(idx)+'.npy', {'f': f, 'Ix2y': Ix2y, 'Iy2x': Iy2x, 'Ixy': Ixy})
Parallel(n_jobs=40, backend='loky', max_nbytes=1e6)(delayed(save_granger)(S, idx) for idx in range(N))
print('Plotting results...')
plot_results.fig4()
if p == 2:
# Generates figure 7 and 8 from the paper
N = 5000 # Number of observations
Trials = 1000 # Number of trials
nvars = 5 # Number of variables
Fs = 2*np.pi
dt = 1.0 / Fs
f = pygc.pySpec.compute_freq(N, Fs)
print('Generating AR model time series...')
Y = ar_model_baccala(nvars, N, Trials)
print('Estimating spectral matrix from ' + str(Trials) + ' trials...')
S = np.zeros([nvars, nvars, N//2 + 1]) * (1 + 1j)
for trial in range(Trials):
if (trial % 100 == 0):
print('Trial = ' + str(trial))
for i in range(nvars):
for j in range(nvars):
S[i,j] += pygc.pySpec.cxy(X=Y[i,:,trial], Y=Y[j,:,trial], f=f, Fs=Fs) / Trials
print('Estimating pairwise Granger casalities')
GC = np.zeros([nvars, nvars])
for i in range(nvars):
for j in range(nvars):
if i == j:
continue
else:
S_aux = np.array([[S[i,i], S[i,j]],[S[j,i], S[j,j]]])
_, H, Z = pygc.non_parametric.wilson_factorization(S_aux, f, Fs, Niterations=10, tol=1e-12, verbose=False)
Ix2y, Iy2x, _ = pygc.granger.granger_causality(S_aux, H, Z)
GC[i,j] = simps(Ix2y, f) / 2*np.pi
GC[j,i] = simps(Iy2x, f) / 2*np.pi
print('Estimating conditional Granger casalities')
F = pygc.granger.conditional_granger_causality(S, f, Fs, Niterations = 10, verbose=False)
cGC = pygc.granger.conditional_spec_granger_causality(S, f, Fs, Niterations=100, tol=1e-12, verbose=False)
print('Saving data...')
np.save('data/fig_7_8.npy', {'f':f,'GC': GC, 'F': F, 'cGC': cGC})
print('Plotting results...')
plot_results.fig7_8()
if p == 3:
# Fits an AR model by solving YW equations as in appendix A of the paper.
Trials = 1000 # Number of trials
Fs = 200 # Sampling frequency
N = 1000 # Number of data points
X = np.zeros([1,N, Trials]) # Data matrix
tsim = N/Fs # Simulation time
# Coefficients of the ar model
c = [0.7, 0.2, -0.1, -0.3]
print('Generating AR model time series...')
for T in range(Trials):
X[0,:,T] = scipy.signal.lfilter([1], -np.array([-1]+c), np.random.randn(N))
print('Estimating AR model coefficients for ' + str(Trials) + ' trials')
for m in [2, 3, 4, 5, 6]:
print()
AR = np.zeros([1,1,m])
SIG = np.zeros([1,1])
for T in range(Trials):
aux1, aux2 = pygc.parametric.YuleWalker(X[:,:,T], m, maxlags=100)
AR += aux1.T/Trials
SIG += aux2.T/Trials
AR = np.round(AR, 2)
SIG = np.round(SIG, 2)
print('Using order = ' + str(m)+ '. Original coefficients: ' + str(c) + '. Estimated coefficients ' + str(AR[0][0]) + '. Noise variace: ' + str(SIG[0][0]))
if p == 4:
# Generates figure 3C from the paper, but using a paramtreic method
# Generates figure 3 from the paper
print('Generating Figure 3 from RBEF paper...')
N = 5000 # Number of observations
Fs = 200 # Sampling frequency
dt = 1.0 / Fs # Time resolution
C = 0.25 # Coupling parameter
Trials = 5000 # Number of trials
# Covariance matrix
cov = np.array([ [1.00, 0.00],
[0.00, 1.00] ])
print('Generating AR model time series...')
X = ar_model_dhamala(N=N, Trials = Trials, C=C, Fs=Fs, t_start=0, t_stop=None, cov=cov)
print('Estimating VAR coefficients using oreder m=2...')
m = 2
AR = np.zeros([m, 2,2])
SIG = np.zeros([2,2])
for T in range(Trials):
aux1, aux2 = pygc.parametric.YuleWalker(X[:,T,:], m, maxlags=100)
AR += aux1/Trials
SIG += aux2/Trials
print('Computing Granger Causality...')
f = pygc.pySpec.compute_freq(N, Fs)
H, S = pygc.parametric.compute_transfer_function(AR, SIG, f, Fs)
Ix2y, Iy2x, _ = pygc.granger.granger_causality(S, H, SIG)
plt.figure(figsize=(6,2))
plt.plot(f, Ix2y)
plt.plot(f, Iy2x)
plt.xlim([0, 100])
plt.ylim([-0.01, 1.2])
plt.ylabel('GC')
plt.xlabel('Frequency [Hz]')
plt.legend([r'$X_{1}\rightarrow X_{2}$', r'$X_{2}\rightarrow X_{1}$'])
plt.savefig('figures/fig9.pdf', dpi = 600)
plt.close()
|
ViniciusLima94/pyGC
|
runRBEF.py
|
runRBEF.py
|
py
| 7,302 |
python
|
en
|
code
| 30 |
github-code
|
6
|
[
{
"api_name": "sys.argv",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "pygc.pySpec.pySpec.compute_freq",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "pygc.pySpec.pySpec",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "pygc.pySpec",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "numpy.zeros",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "pygc.pySpec.pySpec.cxy",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "pygc.pySpec.pySpec",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "pygc.pySpec",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "pygc.pySpec.pySpec.cxy",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "pygc.pySpec.pySpec",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "pygc.pySpec",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "pygc.pySpec.pySpec.cxy",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "pygc.pySpec.pySpec",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "pygc.pySpec",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "pygc.pySpec.pySpec.cxy",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "pygc.pySpec.pySpec",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "pygc.pySpec",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "pygc.pySpec.non_parametric.wilson_factorization",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "pygc.pySpec.non_parametric",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "pygc.pySpec",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "pygc.pySpec.granger.granger_causality",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "pygc.pySpec.granger",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "pygc.pySpec",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "numpy.save",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "plot_results.fig3",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "pygc.pySpec.pySpec.compute_freq",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "pygc.pySpec.pySpec",
"line_number": 74,
"usage_type": "attribute"
},
{
"api_name": "pygc.pySpec",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "numpy.zeros",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "pygc.pySpec.pySpec.morlet",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "pygc.pySpec.pySpec",
"line_number": 86,
"usage_type": "attribute"
},
{
"api_name": "pygc.pySpec",
"line_number": 86,
"usage_type": "name"
},
{
"api_name": "pygc.pySpec.pySpec.morlet",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "pygc.pySpec.pySpec",
"line_number": 87,
"usage_type": "attribute"
},
{
"api_name": "pygc.pySpec",
"line_number": 87,
"usage_type": "name"
},
{
"api_name": "numpy.conj",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "numpy.conj",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "numpy.conj",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "numpy.conj",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "pygc.pySpec.non_parametric.wilson_factorization",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "pygc.pySpec.non_parametric",
"line_number": 98,
"usage_type": "attribute"
},
{
"api_name": "pygc.pySpec",
"line_number": 98,
"usage_type": "name"
},
{
"api_name": "pygc.pySpec.granger.granger_causality",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "pygc.pySpec.granger",
"line_number": 99,
"usage_type": "attribute"
},
{
"api_name": "pygc.pySpec",
"line_number": 99,
"usage_type": "name"
},
{
"api_name": "numpy.save",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "joblib.Parallel",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "joblib.delayed",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "plot_results.fig4",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 112,
"usage_type": "attribute"
},
{
"api_name": "pygc.pySpec.pySpec.compute_freq",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "pygc.pySpec.pySpec",
"line_number": 114,
"usage_type": "attribute"
},
{
"api_name": "pygc.pySpec",
"line_number": 114,
"usage_type": "name"
},
{
"api_name": "numpy.zeros",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "pygc.pySpec.pySpec.cxy",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "pygc.pySpec.pySpec",
"line_number": 127,
"usage_type": "attribute"
},
{
"api_name": "pygc.pySpec",
"line_number": 127,
"usage_type": "name"
},
{
"api_name": "numpy.zeros",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "pygc.pySpec.non_parametric.wilson_factorization",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "pygc.pySpec.non_parametric",
"line_number": 137,
"usage_type": "attribute"
},
{
"api_name": "pygc.pySpec",
"line_number": 137,
"usage_type": "name"
},
{
"api_name": "pygc.pySpec.granger.granger_causality",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "pygc.pySpec.granger",
"line_number": 138,
"usage_type": "attribute"
},
{
"api_name": "pygc.pySpec",
"line_number": 138,
"usage_type": "name"
},
{
"api_name": "scipy.integrate.simps",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 139,
"usage_type": "attribute"
},
{
"api_name": "scipy.integrate.simps",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 140,
"usage_type": "attribute"
},
{
"api_name": "pygc.pySpec.granger.conditional_granger_causality",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "pygc.pySpec.granger",
"line_number": 143,
"usage_type": "attribute"
},
{
"api_name": "pygc.pySpec",
"line_number": 143,
"usage_type": "name"
},
{
"api_name": "pygc.pySpec.granger.conditional_spec_granger_causality",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "pygc.pySpec.granger",
"line_number": 144,
"usage_type": "attribute"
},
{
"api_name": "pygc.pySpec",
"line_number": 144,
"usage_type": "name"
},
{
"api_name": "numpy.save",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "plot_results.fig7_8",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "scipy.signal.lfilter",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "scipy.signal",
"line_number": 167,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "numpy.random.randn",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 167,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "pygc.pySpec.parametric.YuleWalker",
"line_number": 178,
"usage_type": "call"
},
{
"api_name": "pygc.pySpec.parametric",
"line_number": 178,
"usage_type": "attribute"
},
{
"api_name": "pygc.pySpec",
"line_number": 178,
"usage_type": "name"
},
{
"api_name": "numpy.round",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "numpy.round",
"line_number": 183,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 198,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 206,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 207,
"usage_type": "call"
},
{
"api_name": "pygc.pySpec.parametric.YuleWalker",
"line_number": 209,
"usage_type": "call"
},
{
"api_name": "pygc.pySpec.parametric",
"line_number": 209,
"usage_type": "attribute"
},
{
"api_name": "pygc.pySpec",
"line_number": 209,
"usage_type": "name"
},
{
"api_name": "pygc.pySpec.pySpec.compute_freq",
"line_number": 214,
"usage_type": "call"
},
{
"api_name": "pygc.pySpec.pySpec",
"line_number": 214,
"usage_type": "attribute"
},
{
"api_name": "pygc.pySpec",
"line_number": 214,
"usage_type": "name"
},
{
"api_name": "pygc.pySpec.parametric.compute_transfer_function",
"line_number": 215,
"usage_type": "call"
},
{
"api_name": "pygc.pySpec.parametric",
"line_number": 215,
"usage_type": "attribute"
},
{
"api_name": "pygc.pySpec",
"line_number": 215,
"usage_type": "name"
},
{
"api_name": "pygc.pySpec.granger.granger_causality",
"line_number": 216,
"usage_type": "call"
},
{
"api_name": "pygc.pySpec.granger",
"line_number": 216,
"usage_type": "attribute"
},
{
"api_name": "pygc.pySpec",
"line_number": 216,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 218,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 218,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 219,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 219,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 220,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 220,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlim",
"line_number": 221,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 221,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylim",
"line_number": 222,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 222,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 223,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 223,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 224,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 224,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 225,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 225,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 226,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 226,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.close",
"line_number": 227,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 227,
"usage_type": "name"
}
] |
10731823816
|
import torch
from torch import nn
import torchvision.transforms as T
#######################################################################################
######################################## DRML ########################################
#######################################################################################
class RegionLayer(nn.Module):
def __init__(self, in_channels, grid=(8, 8)):
super(RegionLayer, self).__init__()
self.in_channels = in_channels
self.grid = grid
self.region_layers = dict()
for i in range(self.grid[0]):
for j in range(self.grid[1]):
module_name = 'region_conv_%d_%d' % (i, j)
self.region_layers[module_name] = nn.Sequential(
nn.BatchNorm2d(self.in_channels),
nn.ReLU(),
nn.Conv2d(in_channels=self.in_channels, out_channels=self.in_channels,
kernel_size=3, stride=1, padding=1)
)
self.add_module(name=module_name, module=self.region_layers[module_name])
def forward(self, x):
"""
:param x: (b, c, h, w)
:return: (b, c, h, w)
"""
batch_size, _, height, width = x.size()
input_row_list = torch.split(x, split_size_or_sections=height//(self.grid[0]-1), dim=2)
output_row_list = []
for i, row in enumerate(input_row_list):
input_grid_list_of_a_row = torch.split(row, split_size_or_sections=width//(self.grid[1]-1), dim=3)
output_grid_list_of_a_row = []
for j, grid in enumerate(input_grid_list_of_a_row):
module_name = 'region_conv_%d_%d' % (i, j)
# print(module_name)
# print(i,j)
grid = self.region_layers[module_name](grid.contiguous()) + grid
output_grid_list_of_a_row.append(grid)
output_row = torch.cat(output_grid_list_of_a_row, dim=3)
output_row_list.append(output_row)
output = torch.cat(output_row_list, dim=2)
return output
class DRML(nn.Module):
def __init__(self, class_number=12):
super(DRML, self).__init__()
print('Init DRML... pls god work...')
self.class_number = class_number
self.extractor = nn.Sequential(
nn.Conv2d(in_channels=3, out_channels=32, kernel_size=11, stride=1),
RegionLayer(in_channels=32, grid=(8, 8)),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.BatchNorm2d(num_features=32),
nn.Conv2d(in_channels=32, out_channels=16, kernel_size=8, stride=1),
nn.ReLU(),
nn.Conv2d(in_channels=16, out_channels=16, kernel_size=8,),
nn.ReLU(),
nn.Conv2d(in_channels=16, out_channels=16, kernel_size=6, stride=2),
nn.ReLU(),
nn.Conv2d(in_channels=16, out_channels=16, kernel_size=5, stride=1),
nn.ReLU(),
)
self.classifier = nn.Sequential(
nn.Linear(in_features=6400, out_features=4096),
nn.ReLU(),
nn.Dropout(0.5),
nn.Linear(in_features=4096, out_features=2048),
nn.ReLU(),
nn.Dropout(0.5),
nn.Linear(in_features=2048, out_features=class_number)
)
def forward(self, data):
"""
:param x: (b, c, h, w)
:return: (b, class_number)
"""
x = data[0]
batch_size = x.size(0)
output = self.extractor(x)
output = output.view(batch_size, -1)
output = self.classifier(output)
return output
#######################################################################################
####################################### AlexNet ######################################
#######################################################################################
class AlexNet(nn.Module):
def __init__(self, num_classes = 12, dropout = 0.5): #0.5
super().__init__()
print('INIT AU AlexNet')
self.features = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2),
nn.ReLU(inplace=False),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.Conv2d(64, 192, kernel_size=5, padding=2),
nn.ReLU(inplace=False),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.Conv2d(192, 384, kernel_size=3, padding=1),
nn.ReLU(inplace=False),
nn.Conv2d(384, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=False),
nn.Conv2d(256, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=False),
nn.MaxPool2d(kernel_size=3, stride=2),
)
self.classifier = nn.Sequential(
nn.Dropout(p=dropout),
nn.Linear(2304, 4096), # assuming 144x144 input
nn.ReLU(inplace=False),
nn.Dropout(p=dropout),
nn.Linear(4096, 4096),
nn.ReLU(inplace=False),
nn.Linear(4096, num_classes),
)
def forward(self, data):
x = data[0]
x = self.features(x)
x = torch.flatten(x, 1)
out = self.classifier(x)
return out
#######################################################################################
####################################### MTTSCAN ######################################
#######################################################################################
class Attention_mask(nn.Module):
def __init__(self):
super(Attention_mask, self).__init__()
def forward(self, x):
xsum = torch.sum(x, dim=2, keepdim=True)
xsum = torch.sum(xsum, dim=3, keepdim=True)
xshape = tuple(x.size())
return x / xsum * xshape[2] * xshape[3] * 0.5
def get_config(self):
"""May be generated manually. """
config = super(Attention_mask, self).get_config()
return config
class TSM(nn.Module):
def __init__(self, n_segment=10, fold_div=3):
super(TSM, self).__init__()
self.n_segment = n_segment
self.fold_div = fold_div
def forward(self, x):
nt, c, h, w = x.size()
n_batch = nt // self.n_segment
x = x.view(n_batch, self.n_segment, c, h, w)
fold = c // self.fold_div
out = torch.zeros_like(x)
out[:, :-1, :fold] = x[:, 1:, :fold] # shift left
out[:, 1:, fold: 2 * fold] = x[:, :-1, fold: 2 * fold] # shift right
out[:, :, 2 * fold:] = x[:, :, 2 * fold:] # not shift
return out.view(nt, c, h, w)
class MTTS_CAN_SMALL(nn.Module):
"""MTTS_CAN is the multi-task (respiration) version of TS-CAN"""
def __init__(self, in_channels=3, nb_filters1=32, nb_filters2=64, kernel_size=3, dropout_rate1=0.25,
dropout_rate2=0.5, pool_size=(2, 2), nb_dense=128, frame_depth=20):
super(MTTS_CAN_SMALL, self).__init__()
print('init MTTS_CAN_SMALL')
self.in_channels = in_channels
self.kernel_size = kernel_size
self.dropout_rate1 = dropout_rate1
self.dropout_rate2 = dropout_rate2
self.pool_size = pool_size
self.nb_filters1 = nb_filters1
self.nb_filters2 = nb_filters2
self.nb_dense = nb_dense
# TSM layers
self.TSM_1 = TSM(n_segment=frame_depth)
self.TSM_2 = TSM(n_segment=frame_depth)
self.TSM_3 = TSM(n_segment=frame_depth)
self.TSM_4 = TSM(n_segment=frame_depth)
# Motion branch convs
self.motion_conv1 = nn.Conv2d(self.in_channels, self.nb_filters1, kernel_size=self.kernel_size, padding=(1, 1), bias=True)
self.motion_conv2 = nn.Conv2d(self.nb_filters1, self.nb_filters1, kernel_size=self.kernel_size, padding=(1, 1), bias=True)
self.motion_conv3 = nn.Conv2d(self.nb_filters1, self.nb_filters2, kernel_size=self.kernel_size, padding=(1, 1), bias=True)
self.motion_conv4 = nn.Conv2d(self.nb_filters2, self.nb_filters2, kernel_size=self.kernel_size, padding=(1, 1), bias=True)
# Apperance branch convs
self.apperance_conv1 = nn.Conv2d(self.in_channels, self.nb_filters1, kernel_size=self.kernel_size, padding=(1, 1), bias=True)
self.apperance_conv2 = nn.Conv2d(self.nb_filters1, self.nb_filters1, kernel_size=self.kernel_size, padding=(1, 1), bias=True)
self.apperance_conv3 = nn.Conv2d(self.nb_filters1, self.nb_filters2, kernel_size=self.kernel_size, padding=(1, 1), bias=True)
self.apperance_conv4 = nn.Conv2d(self.nb_filters2, self.nb_filters2, kernel_size=self.kernel_size, padding=(1, 1), bias=True)
# Attention layers
self.apperance_att_conv1 = nn.Conv2d(self.nb_filters1, 1, kernel_size=1, padding=(0, 0),bias=True)
self.attn_mask_1 = Attention_mask()
self.apperance_att_conv2 = nn.Conv2d(self.nb_filters2, 1, kernel_size=1, padding=(0, 0),bias=True)
self.attn_mask_2 = Attention_mask()
# Dropout layers
self.dropout_4_y = nn.Dropout(self.dropout_rate2)
self.dropout_4_r = nn.Dropout(self.dropout_rate2)
# Dense layers
self.final_dense_1_y = nn.Linear(5184, self.nb_dense, bias=True)
self.final_dense_2_y = nn.Linear(self.nb_dense, 1, bias=True)
self.final_dense_1_r = nn.Linear(5184, self.nb_dense, bias=True)
self.final_dense_2_r = nn.Linear(self.nb_dense, 1, bias=True)
def forward(self, inputs, params=None):
big = inputs[0]
small = inputs[1]
raw_input = torch.zeros_like(small)
diff_input = small
transform = T.Resize((9,9))
for i in range(big.shape[0]):
# iterate through batch
raw_input[i,:,:,:] = transform(big[i,:,:,:])
diff_input = self.TSM_1(diff_input)
d1 = torch.tanh(self.motion_conv1(diff_input))
d1 = self.TSM_2(d1)
d2 = torch.tanh(self.motion_conv2(d1))
r1 = torch.tanh(self.apperance_conv1(raw_input))
r2 = torch.tanh(self.apperance_conv2(r1))
g1 = torch.sigmoid(self.apperance_att_conv1(r2))
g1 = self.attn_mask_1(g1)
gated1 = d2 * g1
# d3 = self.avg_pooling_1(gated1)
# d4 = self.dropout_1(d3)
# r3 = self.avg_pooling_2(r2)
# r4 = self.dropout_2(r3)
d4 = self.TSM_3(gated1)
d5 = torch.tanh(self.motion_conv3(d4))
d5 = self.TSM_4(d5)
d6 = torch.tanh(self.motion_conv4(d5))
r5 = torch.tanh(self.apperance_conv3(r2))
r6 = torch.tanh(self.apperance_conv4(r5))
g2 = torch.sigmoid(self.apperance_att_conv2(r6))
g2 = self.attn_mask_2(g2)
gated2 = d6 * g2
# d7 = self.avg_pooling_3(gated2)
# d8 = self.dropout_3(d7)
d9 = gated2.view(gated2.size(0), -1)
d10 = torch.tanh(self.final_dense_1_y(d9))
d11 = self.dropout_4_y(d10)
out_y = self.final_dense_2_y(d11)
d10 = torch.tanh(self.final_dense_1_r(d9))
d11 = self.dropout_4_r(d10)
out_r = self.final_dense_2_r(d11)
return out_y, out_r
#######################################################################################
####################################### DEEPPHYS ######################################
#######################################################################################
class DeepPhys(nn.Module):
def __init__(self, in_channels=3, nb_filters1=32, nb_filters2=64, kernel_size=3, dropout_rate1=0.25,
dropout_rate2=0.5, pool_size=(2, 2), nb_dense=128, out_size=1, img_size=36):
"""Definition of DeepPhys.
Args:
in_channels: the number of input channel. Default: 3
img_size: height/width of each frame. Default: 36.
Returns:
DeepPhys model.
"""
super(DeepPhys, self).__init__()
print("INIT DEEPPHYS")
self.in_channels = in_channels
self.kernel_size = kernel_size
self.dropout_rate1 = dropout_rate1
self.dropout_rate2 = dropout_rate2
self.pool_size = pool_size
self.nb_filters1 = nb_filters1
self.nb_filters2 = nb_filters2
self.nb_dense = nb_dense
self.out_size = out_size
# Motion branch convs
self.motion_conv1 = nn.Conv2d(self.in_channels, self.nb_filters1, kernel_size=self.kernel_size, padding=(1, 1), bias=True)
self.motion_conv2 = nn.Conv2d(self.nb_filters1, self.nb_filters1, kernel_size=self.kernel_size, padding=(1, 1), bias=True)
self.motion_conv3 = nn.Conv2d(self.nb_filters1, self.nb_filters2, kernel_size=self.kernel_size, padding=(1, 1), bias=True)
self.motion_conv4 = nn.Conv2d(self.nb_filters2, self.nb_filters2, kernel_size=self.kernel_size, padding=(1, 1), bias=True)
# Apperance branch convs
self.apperance_conv1 = nn.Conv2d(self.in_channels, self.nb_filters1, kernel_size=self.kernel_size, padding=(1, 1), bias=True)
self.apperance_conv2 = nn.Conv2d(self.nb_filters1, self.nb_filters1, kernel_size=self.kernel_size, padding=(1, 1), bias=True)
self.apperance_conv3 = nn.Conv2d(self.nb_filters1, self.nb_filters2, kernel_size=self.kernel_size, padding=(1, 1), bias=True)
self.apperance_conv4 = nn.Conv2d(self.nb_filters2, self.nb_filters2, kernel_size=self.kernel_size, padding=(1, 1), bias=True)
# Attention layers
self.apperance_att_conv1 = nn.Conv2d(self.nb_filters1, 1, kernel_size=1, padding=(0, 0), bias=True)
self.attn_mask_1 = Attention_mask()
self.apperance_att_conv2 = nn.Conv2d(self.nb_filters2, 1, kernel_size=1, padding=(0, 0), bias=True)
self.attn_mask_2 = Attention_mask()
# Dropout layers
self.dropout_4 = nn.Dropout(self.dropout_rate2)
# Dense layers
self.final_dense_1 = nn.Linear(5184, self.nb_dense, bias=True)
self.final_dense_2 = nn.Linear(self.nb_dense, self.out_size, bias=True)
def forward(self, inputs, params=None):
big = inputs[0]
small = inputs[1]
raw_input = torch.zeros_like(small)
diff_input = small
transform = T.Resize((9,9))
for i in range(big.shape[0]):
raw_input[i,:,:,:] = transform(big[i,:,:,:])
d1 = torch.tanh(self.motion_conv1(diff_input))
d2 = torch.tanh(self.motion_conv2(d1))
r1 = torch.tanh(self.apperance_conv1(raw_input))
r2 = torch.tanh(self.apperance_conv2(r1))
g1 = torch.sigmoid(self.apperance_att_conv1(r2))
g1 = self.attn_mask_1(g1)
gated1 = d2 * g1
d5 = torch.tanh(self.motion_conv3(gated1))
d6 = torch.tanh(self.motion_conv4(d5))
r5 = torch.tanh(self.apperance_conv3(r2))
r6 = torch.tanh(self.apperance_conv4(r5))
g2 = torch.sigmoid(self.apperance_att_conv2(r6))
g2 = self.attn_mask_2(g2)
gated2 = d6 * g2
d9 = gated2.view(gated2.size(0), -1)
d10 = torch.tanh(self.final_dense_1(d9))
d11 = self.dropout_4(d10)
out = self.final_dense_2(d11)
return out
|
girishvn/BigSmall
|
code/neural_methods/model/literature_models.py
|
literature_models.py
|
py
| 15,138 |
python
|
en
|
code
| 14 |
github-code
|
6
|
[
{
"api_name": "torch.nn.Module",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "torch.nn.BatchNorm2d",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "torch.split",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "torch.split",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 70,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "torch.nn.MaxPool2d",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "torch.nn.BatchNorm2d",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 80,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 82,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 85,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 86,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 89,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 90,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 91,
"usage_type": "name"
},
{
"api_name": "torch.nn.Dropout",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 92,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 94,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 95,
"usage_type": "name"
},
{
"api_name": "torch.nn.Dropout",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 96,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 98,
"usage_type": "name"
},
{
"api_name": "torch.nn.Module",
"line_number": 117,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 117,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 123,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 124,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 125,
"usage_type": "name"
},
{
"api_name": "torch.nn.MaxPool2d",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 126,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 127,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 128,
"usage_type": "name"
},
{
"api_name": "torch.nn.MaxPool2d",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 129,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 130,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 131,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 132,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 133,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 134,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 135,
"usage_type": "name"
},
{
"api_name": "torch.nn.MaxPool2d",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 136,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 138,
"usage_type": "name"
},
{
"api_name": "torch.nn.Dropout",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 139,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 140,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 141,
"usage_type": "name"
},
{
"api_name": "torch.nn.Dropout",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 142,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 143,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 144,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 145,
"usage_type": "name"
},
{
"api_name": "torch.flatten",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_number": 164,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 164,
"usage_type": "name"
},
{
"api_name": "torch.sum",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "torch.sum",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_number": 179,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 179,
"usage_type": "name"
},
{
"api_name": "torch.zeros_like",
"line_number": 190,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_number": 196,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 196,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 220,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 220,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 221,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 221,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 222,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 222,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 223,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 223,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 225,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 225,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 226,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 226,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 227,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 227,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 228,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 228,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 230,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 230,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 232,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 232,
"usage_type": "name"
},
{
"api_name": "torch.nn.Dropout",
"line_number": 236,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 236,
"usage_type": "name"
},
{
"api_name": "torch.nn.Dropout",
"line_number": 237,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 237,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 240,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 240,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 241,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 241,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 242,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 242,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 243,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 243,
"usage_type": "name"
},
{
"api_name": "torch.zeros_like",
"line_number": 250,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms.Resize",
"line_number": 253,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 253,
"usage_type": "name"
},
{
"api_name": "torch.tanh",
"line_number": 259,
"usage_type": "call"
},
{
"api_name": "torch.tanh",
"line_number": 261,
"usage_type": "call"
},
{
"api_name": "torch.tanh",
"line_number": 263,
"usage_type": "call"
},
{
"api_name": "torch.tanh",
"line_number": 264,
"usage_type": "call"
},
{
"api_name": "torch.sigmoid",
"line_number": 266,
"usage_type": "call"
},
{
"api_name": "torch.tanh",
"line_number": 277,
"usage_type": "call"
},
{
"api_name": "torch.tanh",
"line_number": 279,
"usage_type": "call"
},
{
"api_name": "torch.tanh",
"line_number": 281,
"usage_type": "call"
},
{
"api_name": "torch.tanh",
"line_number": 282,
"usage_type": "call"
},
{
"api_name": "torch.sigmoid",
"line_number": 284,
"usage_type": "call"
},
{
"api_name": "torch.tanh",
"line_number": 292,
"usage_type": "call"
},
{
"api_name": "torch.tanh",
"line_number": 296,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_number": 308,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 308,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 334,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 334,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 335,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 335,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 336,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 336,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 337,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 337,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 339,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 339,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 340,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 340,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 341,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 341,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 342,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 342,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 344,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 344,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 346,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 346,
"usage_type": "name"
},
{
"api_name": "torch.nn.Dropout",
"line_number": 349,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 349,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 351,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 351,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 352,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 352,
"usage_type": "name"
},
{
"api_name": "torch.zeros_like",
"line_number": 360,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms.Resize",
"line_number": 363,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 363,
"usage_type": "name"
},
{
"api_name": "torch.tanh",
"line_number": 367,
"usage_type": "call"
},
{
"api_name": "torch.tanh",
"line_number": 368,
"usage_type": "call"
},
{
"api_name": "torch.tanh",
"line_number": 370,
"usage_type": "call"
},
{
"api_name": "torch.tanh",
"line_number": 371,
"usage_type": "call"
},
{
"api_name": "torch.sigmoid",
"line_number": 373,
"usage_type": "call"
},
{
"api_name": "torch.tanh",
"line_number": 377,
"usage_type": "call"
},
{
"api_name": "torch.tanh",
"line_number": 378,
"usage_type": "call"
},
{
"api_name": "torch.tanh",
"line_number": 380,
"usage_type": "call"
},
{
"api_name": "torch.tanh",
"line_number": 381,
"usage_type": "call"
},
{
"api_name": "torch.sigmoid",
"line_number": 383,
"usage_type": "call"
},
{
"api_name": "torch.tanh",
"line_number": 388,
"usage_type": "call"
}
] |
13085423175
|
import json
# this will create a tweet, with possiblities of adding medias and replying to other tweets
def create_tweet(tas, message, media_ids = None, reply_ids = None):
payload = {"status": message}
if media_ids != None:
payload["media_ids"] = media_ids
if reply_ids != None:
payload["in_reply_to_status_id"] = reply_ids
r = tas.post("https://api.twitter.com/1.1/statuses/update.json", data = payload)
resp = json.loads(r.text)
if r.status_code == 200:
tweet_id = resp["id"]
return 0, (tweet_id,)
return 1, (r.text,)
# this will delete a tweet based on a given tweet-id
def delete_tweet(tas, tweet_id):
r = tas.delete(f"https://api.twitter.com/2/tweets/{tweet_id}")
resp = json.loads(r.text)
|
filming/Twitter
|
src/Twitter/tweet/tweet.py
|
tweet.py
|
py
| 723 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "json.loads",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 25,
"usage_type": "call"
}
] |
15018029353
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 5 16:56:55 2022
@author: josephbriggs
"""
import pathlib
import argparse
import cv2
def main():
'''
Converts files to greyscale.
'''
parser = argparse.ArgumentParser(description='Convert files to greyscale.')
parser.add_argument('--input_path', "-i", type=str,
help='path to the image or directory of images. \
If converting a directory, use *')
parser.add_argument('--output_path', "-o", type=str,
help='output path where images will be saved.')
parser.add_argument('--res', "-r", type=int,
help='downscale factor.')
args = parser.parse_args()
pathlib.Path(args.output_path).mkdir(parents=True, exist_ok=True)
# files = pathlib.Path(args.input_path).glob(r'/*.png|')
file_extentions = ['png', 'jpeg', 'jpg']
files = []
for file_extension in file_extentions:
files += pathlib.Path(args.input_path).glob(fr'*.{file_extension}')
for file in files:
file_name = file.name
image = cv2.imread(str(file))
image_gs = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
file_name_to_save = args.output_path+"/"+file_name
print(file_name_to_save)
cv2.imwrite(file_name_to_save, image_gs)
print('converted files to greyscale')
if __name__ == "__main__":
main()
|
jhb123/enhance_greyscale
|
imgs_to_gs.py
|
imgs_to_gs.py
|
py
| 1,430 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "argparse.ArgumentParser",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRAY",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "cv2.imwrite",
"line_number": 44,
"usage_type": "call"
}
] |
12598627326
|
from airflow.hooks.postgres_hook import PostgresHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class DataQualityOperator(BaseOperator):
"""
Runs data quality check by passing test SQL
Parameters
redshift_conn_id: Redshift Connection ID
test_sql: SQL query to run on Redshift for data validation
expected_result: Expected result to match the test result.
"""
ui_color = '#89DA59'
@apply_defaults
def __init__(self,
redshift_conn_id="",
test_sql="",
expected_result="",
*args, **kwargs):
super(DataQualityOperator, self).__init__(*args, **kwargs)
self.redshift_conn_id=redshift_conn_id
self.test_sql=test_sql
self.expected_result=expected_result
def execute(self, context):
self.log.info("Start data validation...")
redshift_hook = PostgresHook(Postgres_conn_in=self.redshift_conn_id)
self.log.info("Got credentials.")
records=redshift_hook.get_records(self.test_sql)
if records[0][0] != self.expected_result:
raise ValueError(f"Data quality check failed. {records[0][0]} does not equal {self.expected_result}")
else:
self.log.info("Data quality check passed!!!")
|
ljia-ch/airflow_data_pipeline_project
|
plugins/operators/data_quality.py
|
data_quality.py
|
py
| 1,362 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "airflow.models.BaseOperator",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "airflow.utils.decorators.apply_defaults",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "airflow.hooks.postgres_hook.PostgresHook",
"line_number": 32,
"usage_type": "call"
}
] |
21368565276
|
import numpy as np
import time
import matplotlib.pyplot as plt
a=np.loadtxt('meas2/magnitude_0to40.0mA_freq_sweep.csv', delimiter=',')
c=np.loadtxt('meas2/phase_0to40.0mA_freq_sweep.csv', delimiter=',')
b=np.loadtxt('meas2/sweep_feq.csv', delimiter=',')
cstart=0 #start current
cstop=40E-3 # stop current
cstep=5E-3 # current step
csteps=int((cstop-cstart)/cstep)
fig, (ax0,ax1)=plt.subplots(2,1, sharex=True)
for i in range(csteps):
current=cstart+i*cstep
ax0.plot(b,a[:,i], label="{0:d}mA".format(int((current)*1000)))
ax1.plot(b,c[:,i], label="{0:d}mA".format(int((current)*1000)))
ax1.set_xlabel("Larmor frequency in kHz")
ax1.axhline(y=0, color='r', ls='--')
plt.legend(prop={'size':6})
#plt.savefig('meas2/phase_amplitude.png')
plt.show()
|
physikier/magnetometer
|
src/plot.py
|
plot.py
|
py
| 775 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "numpy.loadtxt",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "numpy.loadtxt",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "numpy.loadtxt",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 24,
"usage_type": "name"
}
] |
42656181560
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os.path
import argparse
import logging
from tarfile import TarFile
from thirdparty.dagflow import ParallelTask, Task, DAG, do_dag
from ontbc.common import mkdir, touch, read_tsv
from ontbc.parser import add_barcode_parser
from ontbc.config import PORECHOP_BIN, QUEUE
from ontbc import __file__, __version__, __email__, __author__
LOG = logging.getLogger(__name__)
def read_tar(file):
a = os.path.dirname(file)
return [os.path.join(a, i) for i in TarFile(file).getnames()]
def scan_cell(cell):
fastqs = []
summarys = []
fast5s = []
for root, dirs, files in os.walk(cell, followlinks=True, topdown=False):
for name in files:
path = os.path.join(root, name)
if name.endswith(".fastq"):
fastqs.append(path)
elif name.endswith(".txt"):
summarys.append(path)
elif name.endswith(".fast5"):
fast5s.append(path)
elif name.endswith(".tar"):
fast5s += read_tar(path)
else:
pass
return fastqs, summarys, fast5s
def create_porechop_tasks(cell, barcodes, job_type, work_dir, out_dir):
LOG.info("find fastq, summary and fast5 files in %r" % cell)
fastq_fofn = os.path.join(work_dir, "fastq.fofn")
summary_fofn = os.path.join(work_dir, "summary.fofn")
fast5_fofn = os.path.join(work_dir, "fast5.fofn")
find_done = os.path.join(work_dir, "find_done")
if not os.path.exists(find_done):
fastqs, summarys, fast5s = scan_cell(cell)
for i, j in zip([fastq_fofn, summary_fofn, fast5_fofn], [fastqs, summarys, fast5s]):
with open(i, "w") as fh:
fh.write("%s\n" % "\n".join(j))
del fastqs, summarys, fast5s
touch(find_done)
fastqs = [i[0] for i in read_tsv(fastq_fofn)]
summarys = [i[0] for i in read_tsv(summary_fofn)]
fast5s = [i[0] for i in read_tsv(fast5_fofn)]
LOG.info("%s fastq, %s summary and %s fast5 files found" % (len(fastqs), len(summarys), len(fast5s)))
del summarys, fast5s
if job_type == "local":
_option = ""
else:
_option = "-q %s" % ",".join(QUEUE)
tasks = ParallelTask(
id="bc",
work_dir="%s/{id}" % work_dir,
type=job_type,
option=_option,
script="""
{ontbc}/ontbc.py clean {{fastq}} > clean.fastq
{porechop}/porechop-runner.py -i clean.fastq -b . -t 1 --verbosity 2 --no_split > porechop.log
rm -rf clean.fastq
""".format(
porechop=PORECHOP_BIN,
ontbc=os.path.join(os.path.dirname(__file__), "..")
),
fastq=fastqs,
)
summary = os.path.join(work_dir, "all.summary.txt")
join_summary = Task(
id="join_summary",
work_dir=work_dir,
type=job_type,
script="""
less {summary} | xargs cat - > all.summary.txt
""".format(
summary=summary_fofn
),
)
join_tasks = ParallelTask(
id="join",
work_dir=work_dir,
type=job_type,
script="""
mkdir -p {out}/{{barcode}}
if [ ! -e {{barcode}}_cat_done ]; then
cat */{{barcode}}.fastq > {out}/{{barcode}}/{{barcode}}.fastq
touch {{barcode}}_cat_done
fi
rm -rf */{{barcode}}.fastq
cd {out}/{{barcode}}
{ontbc}/ontbc.py filter --fastq {{barcode}}.fastq --summary {summary} --fast5 {fast5} \\
--min_score -100 --min_length 0 --out {{barcode}}
rm {{barcode}}.filtered.fastq
mv {{barcode}}.filtered.summary.txt {{barcode}}.summary.txt
""".format(
summary=summary,
ontbc=os.path.join(os.path.dirname(__file__), ".."),
fast5=fast5_fofn,
out=out_dir
),
barcode=barcodes
)
for i in join_tasks:
i.set_upstream(*tasks)
i.set_upstream(join_summary)
return tasks, join_tasks, join_summary
def run_porechop(cell, barcodes, job_type, threads, work_dir, out_dir):
assert os.path.isdir(cell), "%r not exist" % cell
out_dir = mkdir(out_dir)
work_dir = mkdir(work_dir)
tasks, join_tasks, join_summary = create_porechop_tasks(
cell=cell,
barcodes=barcodes,
job_type=job_type,
work_dir=work_dir,
out_dir=out_dir
)
dag = DAG("porechop")
dag.add_task(*tasks)
dag.add_task(join_summary)
dag.add_task(*join_tasks)
do_dag(dag, concurrent_tasks=threads, refresh_time=30)
def barcode(args):
run_porechop(
cell=args.cell,
barcodes=args.barcode,
job_type=args.job_type,
threads=args.threads,
work_dir=args.work_dir,
out_dir=args.out_dir
)
def main():
logging.basicConfig(
stream=sys.stderr,
level=logging.INFO,
format="[%(levelname)s] %(message)s"
)
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description="""
version: %s
contact: %s <%s>\
""" % (__version__, " ".join(__author__), __email__))
parser = add_barcode_parser(parser)
args = parser.parse_args()
barcode(args)
if __name__ == "__main__":
main()
|
FlyPythons/ontbc
|
ontbc/barcode.py
|
barcode.py
|
py
| 5,174 |
python
|
en
|
code
| 5 |
github-code
|
6
|
[
{
"api_name": "logging.getLogger",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path.path.dirname",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "os.path.path",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "os.path",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "os.path.path.join",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "os.path.path",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "os.path",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "tarfile.TarFile",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "os.path.walk",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "os.path.path.join",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "os.path.path",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "os.path",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "os.path.path.join",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "os.path.path",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "os.path",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "os.path.path.join",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "os.path.path",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "os.path",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "os.path.path.join",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "os.path.path",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "os.path",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "os.path.path.join",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "os.path.path",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "os.path",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "os.path.path.exists",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "os.path.path",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "os.path",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "ontbc.common.touch",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "ontbc.common.read_tsv",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "ontbc.common.read_tsv",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "ontbc.common.read_tsv",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "ontbc.config.QUEUE",
"line_number": 80,
"usage_type": "argument"
},
{
"api_name": "thirdparty.dagflow.ParallelTask",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "ontbc.config.PORECHOP_BIN",
"line_number": 92,
"usage_type": "name"
},
{
"api_name": "os.path.path.join",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "os.path.path",
"line_number": 93,
"usage_type": "attribute"
},
{
"api_name": "os.path",
"line_number": 93,
"usage_type": "name"
},
{
"api_name": "os.path.path.dirname",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "ontbc.__file__",
"line_number": 93,
"usage_type": "argument"
},
{
"api_name": "os.path.path.join",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "os.path.path",
"line_number": 98,
"usage_type": "attribute"
},
{
"api_name": "os.path",
"line_number": 98,
"usage_type": "name"
},
{
"api_name": "thirdparty.dagflow.Task",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "thirdparty.dagflow.ParallelTask",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "os.path.path.join",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "os.path.path",
"line_number": 132,
"usage_type": "attribute"
},
{
"api_name": "os.path",
"line_number": 132,
"usage_type": "name"
},
{
"api_name": "os.path.path.dirname",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "ontbc.__file__",
"line_number": 132,
"usage_type": "argument"
},
{
"api_name": "os.path.path.isdir",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "os.path.path",
"line_number": 148,
"usage_type": "attribute"
},
{
"api_name": "os.path",
"line_number": 148,
"usage_type": "name"
},
{
"api_name": "ontbc.common.mkdir",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "ontbc.common.mkdir",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "thirdparty.dagflow.DAG",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "thirdparty.dagflow.do_dag",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "logging.basicConfig",
"line_number": 183,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_number": 184,
"usage_type": "attribute"
},
{
"api_name": "logging.INFO",
"line_number": 185,
"usage_type": "attribute"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": "argparse.RawDescriptionHelpFormatter",
"line_number": 190,
"usage_type": "attribute"
},
{
"api_name": "ontbc.__version__",
"line_number": 196,
"usage_type": "name"
},
{
"api_name": "ontbc.__author__",
"line_number": 196,
"usage_type": "argument"
},
{
"api_name": "ontbc.__email__",
"line_number": 196,
"usage_type": "name"
},
{
"api_name": "ontbc.parser.add_barcode_parser",
"line_number": 198,
"usage_type": "call"
}
] |
39865467891
|
from IPython import get_ipython
def type_of_script():
"""
Detects and returns the type of python kernel
:return: string 'jupyter' or 'ipython' or 'terminal'
"""
try:
ipy_str = str(type(get_ipython()))
if 'zmqshell' in ipy_str:
return 'jupyter'
if 'terminal' in ipy_str:
return 'ipython'
except:
return 'terminal'
if type_of_script() == 'jupyter':
from tqdm.notebook import tqdm
else:
from tqdm import tqdm
import matplotlib.pyplot as plt # type: module
import matplotlib.ticker as ticker
from matplotlib import colormaps
from matplotlib.colors import Normalize
import matplotlib.gridspec as gridspec
import numpy as np
import os, glob
import time
import warnings
from rur.fortranfile import FortranFile
from rur import uri, uhmi, painter, drawer
from rur.sci.photometry import measure_luminosity
from rur.sci.geometry import get_angles, euler_angle
from rur.utool import rotate_data
from scipy.ndimage import gaussian_filter
uri.timer.verbose=1
# from rur.sci.kinematics import f_getpot
from icl_IO import mode2repo, pklsave, pklload
from icl_tool import *
from icl_numba import large_isin, large_isind, isin
from icl_draw import drawsnap, add_scalebar, addtext, MakeSub_nolabel, label_to_in, fancy_axis, circle
import argparse, subprocess
from importlib import reload
import cmasher as cmr
from copy import deepcopy
from multiprocessing import Pool, shared_memory
mode = 'nh'
iout = 1026
repo, rurmode, dp = mode2repo(mode)
snap = uri.RamsesSnapshot(repo, iout, mode=rurmode)
snaps = uri.TimeSeries(snap)
snaps.read_iout_avail()
nout = snaps.iout_avail['iout']
gals = uhmi.HaloMaker.load(snap, galaxy=True, double_precision=dp)
hals = uhmi.HaloMaker.load(snap, galaxy=False, double_precision=dp)
database = f"/home/jeon/MissingSat/database"
from common_func import *
tree = pklload(f"{database}/02_main_progenitors.pickle")
if(os.path.exists(f"{database}/halo_dict.pickle")):
halos = pklload(f"{database}/halo_dict.pickle")
else:
halos = {'catalog':{}, 'index':{}}
uri.timer.verbose=0
for iout in tqdm(np.unique(tree['timestep'])):
isnap = snaps.get_snap(iout)
ihals = uhmi.HaloMaker.load(isnap, galaxy=False, double_precision=dp)
indicies = np.zeros(len(ihals), dtype=int)
iids = tree[tree['timestep'] == iout]['id']
ihals = ihals[iids-1]
indicies[iids-1] = np.arange(len(iids))
halos['catalog'][iout] = ihals
halos['index'][iout] = indicies
pklsave(halos, f"{database}/halo_dict.pickle")
def _ibox(h, factor=1):
return np.array([
[h['x']-factor*h['r'], h['x']+factor*h['r']],
[h['y']-factor*h['r'], h['y']+factor*h['r']],
[h['z']-factor*h['r'], h['z']+factor*h['r']]
])
uri.timer.verbose=0
for iout in np.unique(tree['timestep'])[::-1]:
if(os.path.exists(f"{database}/main_prog/cpulist/cpulist_{iout:05d}.pickle")): continue
cpudict = {}
targets = halos['catalog'][iout]
isnap = snaps.get_snap(iout)
cpulists = []
with Pool(32) as pool:
async_result = [
pool.apply_async(
uri.get_cpulist,
(_ibox(h,factor=1.1), None, isnap.levelmax, isnap.bound_key, isnap.ndim, 5, isnap.ncpu)
) for h in targets
]
iterobj = tqdm(async_result, total=len(targets), desc=f"iout={iout:05d}")
for r in iterobj:
cpulists.append(r.get())
cpulists = np.unique( np.concatenate(cpulists) )
cpudict['all'] = cpulists
pklsave(cpudict, f"{database}/main_prog/cpulist/cpulist_{iout:05d}.pickle")
print(f"`{database}/main_prog/cpulist/cpulist_{iout:05d}.pickle` save done")
isnap.clear()
|
syj3514/MissingSat
|
befo231205/05b_get_involved_cpu.py
|
05b_get_involved_cpu.py
|
py
| 3,847 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "IPython.get_ipython",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "rur.uri.timer",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "rur.uri",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "icl_IO.mode2repo",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "rur.uri.RamsesSnapshot",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "rur.uri",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "rur.uri.TimeSeries",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "rur.uri",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "rur.uhmi.HaloMaker.load",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "rur.uhmi.HaloMaker",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "rur.uhmi",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "rur.uhmi.HaloMaker.load",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "rur.uhmi.HaloMaker",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "rur.uhmi",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "icl_IO.pklload",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 72,
"usage_type": "attribute"
},
{
"api_name": "icl_IO.pklload",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "rur.uri.timer",
"line_number": 76,
"usage_type": "attribute"
},
{
"api_name": "rur.uri",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "tqdm.tqdm",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "numpy.unique",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "rur.uhmi.HaloMaker.load",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "rur.uhmi.HaloMaker",
"line_number": 79,
"usage_type": "attribute"
},
{
"api_name": "rur.uhmi",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "numpy.zeros",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "icl_IO.pklsave",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "rur.uri.timer",
"line_number": 97,
"usage_type": "attribute"
},
{
"api_name": "rur.uri",
"line_number": 97,
"usage_type": "name"
},
{
"api_name": "numpy.unique",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 99,
"usage_type": "attribute"
},
{
"api_name": "multiprocessing.Pool",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "rur.uri.get_cpulist",
"line_number": 108,
"usage_type": "attribute"
},
{
"api_name": "rur.uri",
"line_number": 108,
"usage_type": "name"
},
{
"api_name": "tqdm.tqdm",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "numpy.unique",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "icl_IO.pklsave",
"line_number": 117,
"usage_type": "call"
}
] |
12190600050
|
#
# db.py
#
import os
import sqlite3
import time
import datetime
from flask import g
import db_init
def test_db_conn():
dbname = os.environ.get("SEVENS_DB_NAME")
try:
os.remove(dbname)
except OSError:
pass
""" Check if db connection can open, create and init db if doesn't already exist """
print("********dbname: "+dbname)
assert dbname is not None
# If db file doesn't exist call the initialize module to create and init
if not os.path.isfile(dbname):
print("!!!!!!!!!dbname: "+dbname)
db_init.init(dbname)
def get_db():
dbname = os.environ.get("SEVENS_DB_NAME")
assert dbname is not None
if not hasattr(g,'sqlite_db'):
g.sqlite_db = sqlite3.connect(dbname)
return g.sqlite_db
def close_db():
if hasattr(g,'sqlite_db'):
g.sqlite_db.close()
# CREATE TABLE players (id INTEGER PRIMARY KEY AUTOINCREMENT, points INTEGER, name STRING);
def players_row_to_dict(row):
players = {}
players['id'] = row[0]
players['points'] = row[1]
players['name'] = row[2]
return players
#CREATE TABLE hands (player_id INTEGER FOREIGN KEY, clubs STRING, hearts STRING, diamonds STRING, spades STRING);
def hands_row_to_dict(row):
hands = {}
hands['player_id'] = row[0]
hands['clubs'] = str(row[1])
hands['hearts'] = str(row[2])
hands['diamonds'] = str(row[3])
hands['spades'] = str(row[4])
return hands
# CREATE TABLE board (cur_player_id INTEGER FOREIGN KEY, clubs STRING, hearts STRING, diamonds STRING, spades STRING);
def board_row_to_dict(row):
board = {}
board['cur_player_id'] = row[0]
board['clubs'] = str(row[1])
board['hearts'] = str(row[2])
board['diamonds'] = str(row[3])
board['spades'] = str(row[4])
return board
def get_game_state():
conn = get_db()
curs = conn.cursor()
rows = curs.execute ("SELECT * FROM board").fetchall()
board = []
for row in rows:
b = board_row_to_dict(row)
board.append(b)
return board
def get_games():
conn = get_db()
curs = conn.cursor()
rows = curs.execute ("SELECT * FROM games ORDER BY date").fetchall()
games = []
for row in rows:
game = game_row_to_dict(row)
games.append(game)
return games
'''
def update_game(gameid,score,lines,user):
conn = get_db()
curs = conn.cursor()
curs.execute("UPDATE games SET score=?, lines=?, user=?, haveResult=? WHERE id=?;",
(score, lines, user, True, gameid))
conn.commit()
res = curs.execute("SELECT * FROM games WHERE id=?;",(gameid,)).fetchall()
if len(res) != 0:
return game_row_to_dict(res[0])
else:
return None
def add_access_log(game_id, func, method, auth, ip, user_agent):
""" Add access log to global access_log list """
conn = get_db()
curs = conn.cursor()
curs.execute("INSERT INTO accesslog (id, function, method, date, ipaddress, useragent, user) VALUES (?,?,?,?,?,?,?);", (game_id, func, method, time.time(),ip, user_agent, auth))
conn.commit()
def get_access_logs():
conn = get_db()
curs = conn.cursor()
rows = curs.execute ("SELECT * FROM accesslog ORDER BY date").fetchall()
access_log = []
for row in rows:
access = accesslog_row_to_dict(row)
access_log.append(access)
return access_log
def get_rng_seed():
""" Generate rng seed """
return 0xDEADBEEF
'''
|
tolkamps1/sevens7
|
db.py
|
db.py
|
py
| 3,441 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "os.environ.get",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "os.remove",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "db_init.init",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "os.environ.get",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 32,
"usage_type": "argument"
},
{
"api_name": "flask.g.sqlite_db",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "sqlite3.connect",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "flask.g.sqlite_db",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "flask.g",
"line_number": 37,
"usage_type": "argument"
},
{
"api_name": "flask.g.sqlite_db.close",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "flask.g.sqlite_db",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 38,
"usage_type": "name"
}
] |
11941164377
|
#!/usr/bin/env python
#
# fast_mr ->
#
# Fast molecular replacement in the spirit of fast_dp, starting from coordinate
# files and using brute force (and educated guesses) to get everything going.
#
# fast_mr - main program.
import os
import sys
import time
import shutil
import math
import traceback
from multiprocessing import Pool
from iotbx import mtz
from iotbx import pdb
from libtbx.phil import parse
from cctbx.sgtbx import space_group, space_group_symbols
from iotbx.scalepack import merge as merge_scalepack
from libtbx import introspection
if not 'FAST_EP_ROOT' in os.environ:
raise RuntimeError('FAST_EP_ROOT not set')
fast_ep_lib = os.path.join(os.environ['FAST_EP_ROOT'], 'lib')
if not fast_ep_lib in sys.path:
sys.path.append(fast_ep_lib)
from xml_output import write_ispyb_xml
from generate_possible_spacegroups import generate_chiral_spacegroups, \
spacegroup_enantiomorph, spacegroup_full, sanitize_spacegroup
from run_job import run_job, run_job_cluster, is_cluster_job_finished
from fast_mr_phaser import run_phaser_cluster
from parse_pdb import pdb_file_nres
class logger:
def __init__(self):
self._fout = open('fast_mr.log', 'w')
return
def __del__(self):
self._fout.close()
self._cout = None
return
def __call__(self, _line):
sys.stdout.write('%s\n' % _line)
self._fout.write('%s\n' % _line)
return
class Fast_mr:
def __init__(self, hklin, xyzin_and_ids):
self._hklin = os.path.abspath(hklin)
self._xyzins = [os.path.abspath(xyzin_and_id[0])
for xyzin_and_id in xyzin_and_ids]
self._ids = [xyzin_and_id[1] for xyzin_and_id in xyzin_and_ids]
self._cpu = 2
self._machines = 10
self._wd = os.getcwd()
self._log = logger()
self._log('Using %d cpus / %d machines' % (self._cpu, self._machines))
self._full_command_line = ' '.join(sys.argv)
# pull information we'll need from the input MTZ file - the unit cell,
# the pointgroup and the number of reflections in the file. select
# first Miller array in file which has native data
# --- SAD DATA ---
m = mtz.object(self._hklin)
mas = m.as_miller_arrays()
self._data = None
for ma in mas:
if str(ma.observation_type()) != 'xray.amplitude':
continue
self._data = ma
break
if not self._data:
raise RuntimeError('no intensity data found in %s' % \
self._hklin)
self._pointgroup = self._data.space_group().type().number()
self._unit_cell = self._data.unit_cell().parameters()
self._nrefl = m.n_reflections()
self._spacegroups = generate_chiral_spacegroups(self._pointgroup)
# write out a nice summary of the data set properties and what columns
# were selected for analysis
self._log('Input: %s' % self._hklin)
self._log('Columns: %s' % self._data.info().label_string())
self._log('Unit cell: %.2f %.2f %.2f %.2f %.2f %.2f' % \
self._unit_cell)
self._log('Pointgroup: %s' % m.space_group().type().lookup_symbol())
self._log('Resolution: %.2f - %.2f' % self._data.resolution_range())
self._log('Nrefl: %d' % self._nrefl)
self._log('Spacegroups: %s' % ' '.join(self._spacegroups))
self._log('Input coordinate files:')
self._nres = []
for xyzin, _id in zip(self._xyzins, self._ids):
nres = pdb_file_nres(xyzin)
self._nres.append(nres)
self._log('%40s %8d %.3f' % (os.path.split(xyzin)[1], nres, _id))
total_nres = sum(self._nres)
# FIXME calculate probable number of complexes in here
self._copies = 1
return
def do_mr(self):
t0 = time.time()
cluster = True
njobs = self._machines
ncpu = self._cpu
# set up N phaser jobs
jobs = [ ]
for spacegroup in self._spacegroups:
wd = os.path.join(self._wd, spacegroup)
if not os.path.exists(wd):
os.makedirs(wd)
commands = ['mode mr_auto',
'spacegroup %s' % spacegroup,
'hklin %s' % self._hklin,
'labin F=F SIGF=SIGF',
'root mr%s' % spacegroup]
for j, (xyzin, _id, nres) in enumerate(
zip(self._xyzins, self._ids, self._nres)):
commands.append('ensemble m%d pdb %s identity %f' %
(j, xyzin, 100 * _id))
for j, (xyzin, _id, nres) in enumerate(
zip(self._xyzins, self._ids, self._nres)):
commands.append('composition protein nres %d num %d' %
(nres, self._copies))
for j, (xyzin, _id, nres) in enumerate(
zip(self._xyzins, self._ids, self._nres)):
commands.append('search ensemble m%d num %d' %
(j, self._copies))
jobs.append((wd, commands))
# actually execute the tasks - either locally or on a cluster, allowing
# for potential for fewer available machines than jobs
self._log('Running %d x phaser jobs' % len(jobs))
pool = Pool(min(njobs, len(jobs)))
if cluster:
pool.map(run_phaser_cluster, jobs)
else:
print(1/0)
# now look for the results
worked = []
for job in jobs:
wd = job[0]
spacegroup = os.path.split(wd)[-1]
if os.path.exists(os.path.join(wd, 'mr%s.sol' % spacegroup)):
worked.append(os.path.join(wd, 'mr%s.sol' % spacegroup))
for w in worked:
sol = open(w).read()
for record in sol.split('\n'):
if 'SOLU SPAC' in record:
spacegroup = record.replace(
'SOLU SPAC', '').replace(' ', '')
if 'SOLU SET' in record:
tfz = float(record.replace('=', ' ').split()[5])
print('Solution: %s %.2f' % (spacegroup, tfz))
t1 = time.time()
self._log('Time: %.2f' % (t1 - t0))
if __name__ == '__main__':
xyzin_and_ids = []
for arg in sys.argv[2:]:
if ':' in arg:
xyzin = arg.split(':')[0]
_id = float(arg.split(':')[1])
if _id > 1.0:
_id /= 100.0
xyzin_and_ids.append((xyzin, _id))
else:
xyzin_and_ids.append((arg, 1.0))
fast_mr = Fast_mr(sys.argv[1], xyzin_and_ids)
try:
fast_mr.do_mr()
except RuntimeError as e:
fast_mr._log('*** MR: %s ***' % str(e))
traceback.print_exc(file = open('fast_mr.error', 'w'))
sys.exit(1)
|
DiamondLightSource/fast_ep
|
src/fast_mr.py
|
fast_mr.py
|
py
| 6,927 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "os.environ",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "sys.path",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "sys.path.append",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "sys.stdout.write",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "os.getcwd",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 71,
"usage_type": "attribute"
},
{
"api_name": "iotbx.mtz.object",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "iotbx.mtz",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "generate_possible_spacegroups.generate_chiral_spacegroups",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "parse_pdb.pdb_file_nres",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "os.path.split",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 117,
"usage_type": "attribute"
},
{
"api_name": "time.time",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 138,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 139,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "multiprocessing.Pool",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "fast_mr_phaser.run_phaser_cluster",
"line_number": 169,
"usage_type": "argument"
},
{
"api_name": "os.path.split",
"line_number": 177,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 177,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 178,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 178,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 178,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 179,
"usage_type": "attribute"
},
{
"api_name": "time.time",
"line_number": 191,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 196,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 206,
"usage_type": "attribute"
},
{
"api_name": "traceback.print_exc",
"line_number": 211,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 212,
"usage_type": "call"
}
] |
40471740091
|
import os
import cv2
import numpy as np
import shutil
import sys
sys.path.insert(0,os.path.realpath('..'))
sys.path.insert(0,os.path.join(os.path.realpath('..'),'piano_utils'))
from tools.warper import order_points
from config import cfg
from piano_utils.networks import PSPNet
from piano_utils.util import colorize_mask
from piano_utils.keyboard import KeyBoard
from PIL import Image
from tqdm import tqdm
import shapely
from shapely.geometry import Polygon,MultiPoint
import time
from skimage.measure import label, regionprops
from collections import Counter
import json
from IPython import embed
import pickle
exp_cfg = {
'exp_imgs':'/home/data/lj/Piano/experment/keyboard/exp_imgs',
'tmp_dir':'/home/data/lj/Piano/experment/keyboard/tmp_dir',
'figure_dir':'/home/data/lj/Piano/experment/keyboard_figure'
}
class HoughKeyBoard(object):
def __init__(self):
self.theta_thersh = 0.08
def hough_transform(self,img):
res = {}
img_ori = img.copy()
h, w = img.shape[:2]
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (5, 5), 0)
edges = cv2.Canny(gray, 50, 150, 3)
lines = cv2.HoughLines(edges, 1.0, np.pi / 180, 120)
thetas = [x[0][1] for x in lines if not (x[0][1] < (np.pi / 4.) or
x[0][1] > (3.*np.pi/4.0))]
dic = dict(Counter(thetas))
theta = sorted(dic.items(), key=lambda x: x[1], reverse=True)
if len(theta) > 0 and theta[0][1] > 1: #---统计角度最多重复的直线
most_theta = theta[0][0]
else:
return
x_loc, y_loc, pts = [], [], []
for line in lines:
rho, theta = line[0]
if abs(most_theta * 180 / np.pi - 90) > 1.5: #--键盘是斜着的
if abs(theta - most_theta) > self.theta_thersh:
continue
else: #---其他情况
if not theta == most_theta:
continue
pt1 = (0, max(int(rho / np.sin(theta)), 0))
pt2 = (img_ori.shape[1], max(int((rho - img_ori.shape[1] * np.cos(theta)) / np.sin(theta)),0))
cv2.line(img_ori, pt1, pt2, (0, 255, 0), 1)
pts.append((pt1, pt2))
return img_ori
def get_img_box_dict():
img_box_dict = dict()
file_name = '/home/data/lj/Piano/Segment/train.txt'
with open(file_name,'r') as fr:
items = [l.strip() for l in fr.readlines()]
mask_lists = []
for item in items:
item = item.split()
if 'tools' in item[0]:
#mask_dir = item[0].split('/')[-2]
continue
else:
mask_dir = os.path.basename(item[0]).split('_img_') [0]
if 'segment' in item[0]:continue
if mask_dir in mask_lists:continue
mask_lists.append(mask_dir)
img_mask = cv2.imread(item[1],cv2.IMREAD_GRAYSCALE)
img_mask[img_mask==2] = 1
img_mask[img_mask==1] = 255
contours,_ = cv2.findContours(img_mask,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)
assert len(contours)==1,'value wrong'
contours = np.squeeze(contours)
rect = order_points(contours).reshape(-1,1,2).astype(int)
img_box_dict[mask_dir] = rect
json_path = os.path.join(exp_cfg['exp_imgs'],'need_labels')
json_files = [os.path.join(json_path,x) for x in os.listdir(json_path) if x.endswith('json')]
json_files.sort()
for json_file in json_files:
with open(json_file,'r') as fr:
items = json.load(fr)
basename = os.path.basename(json_file).split('.')[0]
points = np.array(items['shapes'][0]['points'])
rect = order_points(points).reshape(-1,1,2).astype(int)
img_box_dict[basename] = rect
return img_box_dict
class KeyBoard_Exp(KeyBoard):
def __init__(self):
KeyBoard.__init__(self)
print('KeyBoard load finish')
def detect(self,img):
image = img.convert('RGB')
self.prediction = self.inference(img)
contours,_ = self.find_contours(image,self.prediction)
rect = order_points(contours).reshape(-1,1,2).astype(int)
return rect
def mask2image(self,image):
image = Image.fromarray(cv2.cvtColor(image,cv2.COLOR_BGR2RGB))
w, h = image.size
colorized_mask = colorize_mask(self.prediction, self.palette)
output_im = Image.new('RGB', (w*2, h))
output_im.paste(image, (0,0))
output_im.paste(colorized_mask, (w,0))
output_im = cv2.cvtColor(np.asarray(output_im),cv2.COLOR_RGB2BGR)
return output_im
def cal_iou(gt_rect, det_rect):
#---不规则的两个四边形计算Iou,不是矩形了
gt_rect = gt_rect.reshape(4, 2)
poly1 = Polygon(gt_rect).convex_hull
det_rect = det_rect.reshape(4,2)
poly2 = Polygon(det_rect).convex_hull
union_poly = np.concatenate((gt_rect,det_rect))
if not poly1.intersects(poly2):
iou = 0
else:
try:
inter_area = poly1.intersection(poly2).area
union_area = MultiPoint(union_poly).convex_hull.area
if union_area == 0:
iou= 0
iou=float(inter_area) / union_area
except shapely.geos.TopologicalError:
print('shapely.geos.TopologicalError occured, iou set to 0')
iou = 0
return iou
def ensure_dir(path):
if not os.path.exists(path):
os.makedirs(path)
def main():
seg_pickle_file = os.path.join(exp_cfg['tmp_dir'],'seg.pkl')
hour_pickle_file = os.path.join(exp_cfg['tmp_dir'],'hourgh.pkl')
path = exp_cfg['exp_imgs']
save_seg_dir = os.path.join(exp_cfg['figure_dir'],'segment')
save_hourgh_dir = os.path.join(exp_cfg['figure_dir'],'hourgh')
ensure_dir(save_seg_dir)
ensure_dir(save_hourgh_dir)
img_files = [os.path.join(path,x) for x in os.listdir(path)]
gt_box_dict = get_img_box_dict()
with open(seg_pickle_file,'rb') as f1:
seg_box_dict = pickle.load(f1)
with open(hour_pickle_file,'rb') as f2:
hour_box_dict = pickle.load(f2)
seg_ious = []
for img_mark,det_rect in seg_box_dict.items():
gt_rect = gt_box_dict[img_mark]
iou = cal_iou(gt_rect,det_rect)
if iou>0.5:
seg_ious.append(iou)
else:print(img_mark)
hour_detector = HoughKeyBoard()
keyboard_net = KeyBoard_Exp()
hour_ious = []
for img_mark,det_rect in hour_box_dict.items():
gt_rect = gt_box_dict[img_mark]
iou = cal_iou(gt_rect,det_rect)
if iou>0.5:
hour_ious.append(iou)
else:
img = cv2.imread(os.path.join(path,img_mark+'.jpg'))
img_copy = img.copy()
img_input = Image.fromarray(cv2.cvtColor(img_copy,cv2.COLOR_BGR2RGB))
seg_rect = keyboard_net.detect(img_input)
for rect in det_rect:
rect = rect[0]
cv2.circle(img,(rect[0],rect[1]),5,(0,255,0),3)
for rect in seg_rect:
rect = rect[0]
cv2.circle(img_copy,(rect[0],rect[1]),5,(0,255,0),3)
img_copy = keyboard_net.mask2image(img_copy)
img = hour_detector.hough_transform(img)
cv2.imwrite(os.path.join(save_hourgh_dir,img_mark+'.jpg'),img)
cv2.imwrite(os.path.join(save_seg_dir,img_mark+'.jpg'),img_copy)
if __name__=='__main__':
main()
|
yxlijun/vision-piano-amt
|
figures/plt_keyboard.py
|
plt_keyboard.py
|
py
| 7,430 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "sys.path.insert",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "os.path.realpath",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "sys.path.insert",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "os.path.realpath",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRAY",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "cv2.GaussianBlur",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "cv2.Canny",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "cv2.HoughLines",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "numpy.pi",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "numpy.pi",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "collections.Counter",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "numpy.sin",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "cv2.line",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "os.path.basename",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 76,
"usage_type": "attribute"
},
{
"api_name": "cv2.imread",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "cv2.IMREAD_GRAYSCALE",
"line_number": 80,
"usage_type": "attribute"
},
{
"api_name": "cv2.findContours",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "cv2.RETR_EXTERNAL",
"line_number": 83,
"usage_type": "attribute"
},
{
"api_name": "cv2.CHAIN_APPROX_NONE",
"line_number": 83,
"usage_type": "attribute"
},
{
"api_name": "numpy.squeeze",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "tools.warper.order_points",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 89,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 90,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "os.path.basename",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 95,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "tools.warper.order_points",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "piano_utils.keyboard.KeyBoard",
"line_number": 102,
"usage_type": "name"
},
{
"api_name": "piano_utils.keyboard.KeyBoard.__init__",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "piano_utils.keyboard.KeyBoard",
"line_number": 104,
"usage_type": "name"
},
{
"api_name": "tools.warper.order_points",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "PIL.Image.fromarray",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 115,
"usage_type": "name"
},
{
"api_name": "cv2.cvtColor",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2RGB",
"line_number": 115,
"usage_type": "attribute"
},
{
"api_name": "piano_utils.util.colorize_mask",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "PIL.Image.new",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 118,
"usage_type": "name"
},
{
"api_name": "cv2.cvtColor",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_RGB2BGR",
"line_number": 121,
"usage_type": "attribute"
},
{
"api_name": "shapely.geometry.Polygon",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "shapely.geometry.Polygon",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "shapely.geometry.MultiPoint",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "shapely.geos",
"line_number": 141,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 148,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 152,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 153,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 155,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 156,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 160,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 186,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 186,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 186,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image.fromarray",
"line_number": 188,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 188,
"usage_type": "name"
},
{
"api_name": "cv2.cvtColor",
"line_number": 188,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2RGB",
"line_number": 188,
"usage_type": "attribute"
},
{
"api_name": "cv2.circle",
"line_number": 192,
"usage_type": "call"
},
{
"api_name": "cv2.circle",
"line_number": 195,
"usage_type": "call"
},
{
"api_name": "cv2.imwrite",
"line_number": 198,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 198,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 198,
"usage_type": "attribute"
},
{
"api_name": "cv2.imwrite",
"line_number": 199,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 199,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 199,
"usage_type": "attribute"
}
] |
1383718733
|
from .base import metadata
from sqlalchemy import Table, Column, BigInteger,\
String, Boolean, DateTime
t_users = Table(
"users",
metadata,
Column('u_id', BigInteger), # telegram id
Column('name', String), # фамилия c инициалами
Column('name_tg', String), # имя пользователя в телеге если есть
Column('admin', Boolean), # является ли администратором
Column('org_name', String),
Column('org_code', String),
Column('date_update', DateTime),
)
|
oleg-medovikov/eventlog
|
base/users.py
|
users.py
|
py
| 618 |
python
|
ru
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "sqlalchemy.Table",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "base.metadata",
"line_number": 8,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.BigInteger",
"line_number": 9,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.String",
"line_number": 10,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.String",
"line_number": 11,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Boolean",
"line_number": 12,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.String",
"line_number": 13,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.String",
"line_number": 14,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.DateTime",
"line_number": 15,
"usage_type": "argument"
}
] |
5759912904
|
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 20 09:17:19 2019
@author: if715029
"""
import numpy as np
import matplotlib.pyplot as plt
import scipy.spatial.distance as sc
import pandas as pd
#%%
data = pd.read_excel('../data/Datos_2015.xlsx',sheet_name='Atemajac')
#%%
data = data.iloc[:,2:7].dropna()
#%%
D1 = sc.squareform(sc.pdist(data.iloc[:,2:],'euclidean'))
#%%
data_norm = (data-data.mean(axis=0))/data.std(axis=0)
#%%
plt.subplot(1,2,1)
plt.scatter(data['CO'],data['PM10'])
plt.axis('square')
plt.subplot(1,2,2)
plt.scatter(data_norm['CO'],data_norm['PM10'])
plt.axis('square')
plt.show()
|
OscarFlores-IFi/CDINP19
|
code/p6.py
|
p6.py
|
py
| 616 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pandas.read_excel",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "scipy.spatial.distance.squareform",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "scipy.spatial.distance",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "scipy.spatial.distance.pdist",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.axis",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.axis",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 32,
"usage_type": "name"
}
] |
18659651890
|
import models
import typing
import sqlite3
import os
import sys
class Storage:
def __init__(self):
self._conn = sqlite3.connect('v_store.db')
self._cursor = self._conn.cursor()
self._queries: dict[str, str] = self.read_queries()
def __del__(self):
self._cursor.close()
self._conn.close()
def read_queries(self) -> dict[str, str]:
queries = {}
current_key = None
current_query = []
with open("./queries.sql", "r") as f:
for line in f:
line = line.strip()
if line.startswith("--"):
if current_key is not None:
queries[current_key] = "\n".join(current_query)
current_query = []
current_key = line[2:].strip()
else:
current_query.append(line)
if current_key is not None:
queries[current_key] = "\n".join(current_query)
return queries
def make_table(self) -> int:
try:
self._cursor.execute(self._queries["make_vector_table"])
return 0
except Exception as e:
raise e
def add_point(self, point: models.Point) -> int:
try:
query = self._queries["add_point"].format(str(point), repr(point))
self._cursor.execute(query)
return 0
except Exception as e:
raise e
def get_point(self, id) -> int | models.Vector:
try:
query = self._queries["get_point_by_id"].format(id)
self._cursor.execute(query)
vec: models.Vector = self._cursor.fetchone()
if vec:
return exec(vec)
else:
raise ValueError("Point with that id does not exist")
except Exception as e:
raise e
def delete_point(self, id) -> int:
try:
query = self._queries["delete_point_by_id"].format(id)
self._cursor.execute(query)
return 0
except Exception as e:
raise e
s = Storage()
print(s.read_queries())
|
Ayon-Bhowmick/Vec4Rec
|
src/storage.py
|
storage.py
|
py
| 2,150 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "sqlite3.connect",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "models.Point",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "models.Vector",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "models.Vector",
"line_number": 50,
"usage_type": "attribute"
}
] |
22951595310
|
#!/usr/bin/env python2.7
"""
A tool to update the Product Version and Code of a C# VS2010 setup package (*.vdproj). Intended
to be used with an automated build process.
"""
import re
import uuid
import argparse
import os, shutil
import tempfile
##"ProductCode" = "8:{35424778-8534-431B-9492-5CD84B1EDE03}"
productcode_re = re.compile(r"(?:\"ProductCode\" = \"8.){([\d\w-]+)}")
##"ProductVersion" = "8:1.0.89"
productversion_re = re.compile(r"(?:\"ProductVersion\" = \"8.)([\d\w\.]+)\"")
def replace_code_and_version(src_fname, version="1.0.0", code="12345678-1234-1234-1234-1233456789012"):
fd, tmp_fname = tempfile.mkstemp()
tmp = open(tmp_fname, 'w')
src = open(src_fname)
for l in src:
if productcode_re.search(l):
m = productcode_re.search(l)
l = l.replace(m.group(1), code)
if productversion_re.search(l):
m = productversion_re.search(l)
l = l.replace(m.group(1), version)
tmp.write(l)
tmp.close()
os.close(fd)
src.close()
os.remove(src_fname)
shutil.move(tmp_fname, src_fname)
def parse_commands(test_args=None):
descrip = "Utility to update ProductCode and ProductVersion of VS2010 setup projects"
parser = argparse.ArgumentParser(description=descrip)
parser.add_argument("-f", "--file", dest="vdproj", action="store", default=None,
help="The vdproj file to be 'adjusted'", required=True)
parser.add_argument("-v", "--version", action="store", dest="version", default="1.0.0",
help="The new version to be set conforming to: major, minor, build e.g '1.0.195'")
parser.add_argument("-c", "--code", dest="code", action="store", default=str(uuid.uuid4()),
help="The new product code GUID. If not provided one is generated. ")
## Don't update the UpgradeCode that needs to stay the same for the product duration
if test_args is None:
args = parser.parse_args()
else:
args = parser.parse_args(test_args)
return args
def main():
args = parse_commands()
replace_code_and_version(args.file, args.version, args.code)
if __name__ == '__main__':
main()
|
wfriedl/pvc_changer
|
pvc_changer.py
|
pvc_changer.py
|
py
| 2,183 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "re.compile",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "tempfile.mkstemp",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.close",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "os.remove",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "shutil.move",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "uuid.uuid4",
"line_number": 48,
"usage_type": "call"
}
] |
19882708740
|
import os
import click
from guardata.utils import trio_run
from guardata.api.protocol import OrganizationID
from guardata.logging import configure_logging
from guardata.cli_utils import spinner, cli_exception_handler
from guardata.client.types import BackendAddr, BackendOrganizationBootstrapAddr
from guardata.client.backend_connection import apiv1_backend_administration_cmds_factory
async def _create_organization(debug, name, backend_addr, administration_token, expiration_date):
async with spinner("Creating group in backend"):
async with apiv1_backend_administration_cmds_factory(
backend_addr, administration_token
) as cmds:
rep = await cmds.organization_create(name, expiration_date)
if rep["status"] != "ok":
raise RuntimeError(f"Backend refused to create group: {rep}")
bootstrap_token = rep["bootstrap_token"]
organization_addr = BackendOrganizationBootstrapAddr.build(backend_addr, name, bootstrap_token)
organization_addr_display = click.style(organization_addr.to_url(), fg="yellow")
click.echo(f"Bootstrap group url: {organization_addr_display}")
@click.command(short_help="create new group")
@click.argument("name", required=True, type=OrganizationID)
@click.option("--addr", "-B", required=True, type=BackendAddr.from_url)
@click.option("--administration-token", "-T", required=True)
@click.option("--expiration-date", "-E", default=None, type=click.DateTime())
def create_organization(name, addr, administration_token, expiration_date):
debug = "DEBUG" in os.environ
configure_logging(log_level="DEBUG" if debug else "WARNING")
with cli_exception_handler(debug):
trio_run(_create_organization, debug, name, addr, administration_token, expiration_date)
|
bitlogik/guardata
|
guardata/client/cli/create_organization.py
|
create_organization.py
|
py
| 1,793 |
python
|
en
|
code
| 9 |
github-code
|
6
|
[
{
"api_name": "guardata.cli_utils.spinner",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "guardata.client.backend_connection.apiv1_backend_administration_cmds_factory",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "guardata.client.types.BackendOrganizationBootstrapAddr.build",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "guardata.client.types.BackendOrganizationBootstrapAddr",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "click.style",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "click.echo",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "guardata.logging.configure_logging",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "guardata.cli_utils.cli_exception_handler",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "guardata.utils.trio_run",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "click.command",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "click.argument",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "guardata.api.protocol.OrganizationID",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "click.option",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "guardata.client.types.BackendAddr.from_url",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "guardata.client.types.BackendAddr",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "click.option",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "click.option",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "click.DateTime",
"line_number": 31,
"usage_type": "call"
}
] |
3345758956
|
from tkinter import *
from tkinter import messagebox
import tkinter as tk
import time, sys
from pygame import mixer
from PIL import Image, ImageTk
def alarm():
alarm_time=user_input.get()
if alarm_time=="":
messagebox.askretrycancel("Error Message","Please Enter value")
else:
while True:
time.sleep(1)
if(alarm_time==time.strftime("%H:%M")):
playmusic()
def playmusic():
mixer.init()
mixer.music.load(' clock.mp3')
mixer.music.play()
while mixer.music.get_busy():
time.sleep(30)
mixer.music.stop()
sys.exit()
root=Tk()
root.title(" Alarm clock")
canvas=Canvas(root, width=600,height=380)
image=ImageTk.PhotoImage(Image.open("clock image .png"))
canvas.create_image(0,0,anchor=NW, image=image)
canvas.pack()
header=Frame(root)
box1=Frame(root)
box1.place(x=250,y=180)
box2=Frame(root)
box2.place(x=250,y=180)
#time taken by user
#helv36 = tkFont.Font(family="Helvetica",size=36,weight="bold")
user_input=Entry(box1,font=('ArialNarrow', 20),width=8)
user_input.grid(row=0, column=2)
#set alarm button
start_button = Button( )
root.mainloop()
|
shuchi111/Alarm_clockGUI.py
|
alarm.py
|
alarm.py
|
py
| 1,256 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "tkinter.messagebox.askretrycancel",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "tkinter.messagebox",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "time.strftime",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pygame.mixer.init",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "pygame.mixer",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "pygame.mixer.music.load",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "pygame.mixer.music",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "pygame.mixer",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "pygame.mixer.music.play",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "pygame.mixer.music",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "pygame.mixer",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "pygame.mixer.music.get_busy",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "pygame.mixer.music",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "pygame.mixer",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "pygame.mixer.music.stop",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "pygame.mixer.music",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "pygame.mixer",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "sys.exit",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "PIL.ImageTk.PhotoImage",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "PIL.ImageTk",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "PIL.Image.open",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 30,
"usage_type": "name"
}
] |
17345627172
|
import os
import glob
import torch
from torchvision import transforms as T
from torch.utils.data import DataLoader,Dataset
from torch.utils.data.distributed import DistributedSampler
from codes.utils import img_processing
from codes.data import data_utils
import math
import numpy as np
class Load_Data(Dataset):
'''
读取图片
获取低照度图像的Y通道
读取低照度图像、低照度+噪声图像、正常照度图像
'''
def __init__(self, data_root, data_son=None, img_type='jpg', is_resize=False, is_long_resize=False, resize_h=512, resize_w=512):
if data_son is not '':
# 如果非None,则读取成对的低照度-正常照度图像
imgs_ll = glob.glob(os.path.join(data_root, data_son['ll'], '*.*' ))
imgs_ll_noise = glob.glob(os.path.join(data_root, data_son['ll_noise'], '*.*' ))
imgs_org = glob.glob(os.path.join(data_root, data_son['org'], '*.*'))
imgs_org_enhance = glob.glob(os.path.join(data_root, data_son['org_en'], '*.*'))
self.imgs_ll = imgs_ll
self.imgs_org = imgs_org
self.imgs_org_enhance = imgs_org_enhance
else:
imgs_ll_noise = glob.glob(os.path.join(data_root, '*.*'))
self.imgs_ll_noise = imgs_ll_noise
self.data_son = data_son
self.is_resize = is_resize
self.resize_h = resize_h
self.resize_w = resize_w
self.is_long_resize = is_long_resize
# 对图片的操作
self.img_ll_transform = data_utils.train_ll_transforms()
self.img_org_transform = data_utils.train_org_transforms()
def __getitem__(self, index):
'''
读取图片,并对图片进行相应的处理
:param index:
:return:
'''
imgs_ll_path = self.imgs_ll[index] # 低照度,返回下标为index的低照度图片路径
imgs_ll_noise_path = imgs_ll_path.replace(self.data_son['ll'], self.data_son['ll_noise']) # 低照度 + noise
[_, name] = os.path.split(imgs_ll_path)
suffix = name[name.find('.') + 1:] # 图片类型
name = name[:name.find('.')]
img_ll = img_processing.read_image(imgs_ll_path, is_resize=self.is_resize, resize_height=self.resize_h,
resize_width=self.resize_w, normalization=True,
is_long_resize=self.is_long_resize)
img_ll_noise, y = img_processing.read_image(imgs_ll_noise_path, is_resize=self.is_resize, resize_height=self.resize_h,
resize_width=self.resize_w, normalization=True,
is_long_resize=self.is_long_resize, is_cvtColor='YCrCb')
# t0 = abs(img_ll_noise - img_ll)
# t = abs(img_ll_noise - img_ll) / (img_ll + 1e-7)
# r_max = t[:,:,0].max()
# noise_map = np.max(abs(img_ll_noise - img_ll) / img_ll_noise, axis=(0,1,2))
# noise = self.noise_map(img_ll_noise)
noise_map = img_ll_noise - img_ll # 对于非加性噪声,这种求法不对
noise_map = self.img_org_transform(noise_map)
img_ll = self.img_org_transform(img_ll)
img_ll_noise = self.img_org_transform(img_ll_noise)
if self.data_son is not '': # 读取正常照度图像
imgs_org_path = imgs_ll_path.replace(self.data_son['ll'], self.data_son['org'])
imgs_org_path = imgs_org_path.replace('png', 'jpg') # org集的图片格式为jpg
img_org = img_processing.read_image(imgs_org_path, is_resize=self.is_resize, resize_height=self.resize_h,
resize_width=self.resize_w, normalization=False,
is_long_resize=self.is_long_resize)
img_org = self.img_org_transform(img_org)
imgs_org_en_path = imgs_ll_path.replace(self.data_son['ll'], self.data_son['org_en'])
img_org_en, y_en = img_processing.read_image(imgs_org_en_path, is_resize=self.is_resize, resize_height=self.resize_h,
resize_width=self.resize_w, normalization=False,
is_long_resize=self.is_long_resize, is_cvtColor='YCrCb')
img_org_en = self.img_org_transform(img_org_en)
return img_ll, img_ll_noise, img_org, img_org_en, y, noise_map, name
else:
return img_ll, y, name
def __len__(self):
return len(self.imgs_ll) # 总图片数量
def get_loader(data_root, data_son, batch_size, is_resize=False,resize_h=384, resize_w=384, img_type='jpg', is_long_resize=False):
dataset = Load_Data(data_root, data_son, is_resize=is_resize, resize_h=resize_h, resize_w=resize_w, img_type=img_type, is_long_resize=is_long_resize)
data_loader = DataLoader(dataset=dataset,
batch_size=batch_size,
shuffle=False,
num_workers=1,
pin_memory=True) # 锁页内存,设置pin_memory=True,则意味着生成的Tensor数据最开始是属于内存中的锁页内存,
# 这样将内存的Tensor转义到GPU的显存就会更快一些
# 显卡中的显存全部是锁页内存,当计算机的内存充足的时候,可以设置pin_memory=True
# 省掉将数据从CPU传入到RAM中,再传到GPU上的过程。而是直接将数据映射到GPU的相关内存上,节省数据传输的时间
return data_loader
|
csxuwu/LRCR_Net
|
codes/data/data_loader4.py
|
data_loader4.py
|
py
| 5,103 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "torch.utils.data.Dataset",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "glob.glob",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "glob.glob",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "glob.glob",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "glob.glob",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "glob.glob",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "codes.data.data_utils.train_ll_transforms",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "codes.data.data_utils",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "codes.data.data_utils.train_org_transforms",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "codes.data.data_utils",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "os.path.split",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "codes.utils.img_processing.read_image",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "codes.utils.img_processing",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "codes.utils.img_processing.read_image",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "codes.utils.img_processing",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "codes.utils.img_processing.read_image",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "codes.utils.img_processing",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "codes.utils.img_processing.read_image",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "codes.utils.img_processing",
"line_number": 84,
"usage_type": "name"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 101,
"usage_type": "call"
}
] |
33699952990
|
from django.contrib import admin
from django.urls import path
from web.views import home_page
from django.contrib.auth.views import LoginView, LogoutView
from ckt.views import (
CircuitControlView,
CircuitStatusView,
write_api_view,
read_api_view,
to_circuit,
plot_graph,
chartview,
graph_two,
usercreation,
aboutpage,
privacy,
Register,
Method,
widgets,
)
urlpatterns = [
path("admin/", admin.site.urls),
path("", home_page, name="home"),
path("circuit/", CircuitControlView.as_view(), name="ckt"),
path("circuit_status/", CircuitStatusView.as_view(), name="ckt_status"),
path("control/", to_circuit, name="control"),
path("api_write/", write_api_view, name="write_api"),
path("api_read/", read_api_view, name="read_api"),
path("graph_plot/", plot_graph, name="plot_graph"),
path("chartview/", chartview, name="viewchart"),
path("graph_two/", graph_two, name="chartview_two"),
path("register/", usercreation, name="register"),
path("login/", LoginView.as_view(template_name="Login.html"), name="login"),
path("logout/", LogoutView.as_view(template_name="logout.html"), name="logout"),
path("aboutpage/", aboutpage, name="about_page"),
path("privacy/", privacy, name="privacy_policy"),
path("Register/", Register, name="Register_page"),
path("Method/", Method, name="Method_page"),
path("widgets/", widgets, name="widgets_page"),
]
|
sumansam312/IOT_Platform
|
iot/urls.py
|
urls.py
|
py
| 1,462 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.urls.path",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "django.contrib.admin.site",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "web.views.home_page",
"line_number": 25,
"usage_type": "argument"
},
{
"api_name": "django.urls.path",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "ckt.views.CircuitControlView.as_view",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "ckt.views.CircuitControlView",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "ckt.views.CircuitStatusView.as_view",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "ckt.views.CircuitStatusView",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "ckt.views.to_circuit",
"line_number": 28,
"usage_type": "argument"
},
{
"api_name": "django.urls.path",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "ckt.views.write_api_view",
"line_number": 29,
"usage_type": "argument"
},
{
"api_name": "django.urls.path",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "ckt.views.read_api_view",
"line_number": 30,
"usage_type": "argument"
},
{
"api_name": "django.urls.path",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "ckt.views.plot_graph",
"line_number": 31,
"usage_type": "argument"
},
{
"api_name": "django.urls.path",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "ckt.views.chartview",
"line_number": 32,
"usage_type": "argument"
},
{
"api_name": "django.urls.path",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "ckt.views.graph_two",
"line_number": 33,
"usage_type": "argument"
},
{
"api_name": "django.urls.path",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "ckt.views.usercreation",
"line_number": 34,
"usage_type": "argument"
},
{
"api_name": "django.urls.path",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.views.LoginView.as_view",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.views.LoginView",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.views.LogoutView.as_view",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.views.LogoutView",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "ckt.views.aboutpage",
"line_number": 37,
"usage_type": "argument"
},
{
"api_name": "django.urls.path",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "ckt.views.privacy",
"line_number": 38,
"usage_type": "argument"
},
{
"api_name": "django.urls.path",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "ckt.views.Register",
"line_number": 39,
"usage_type": "argument"
},
{
"api_name": "django.urls.path",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "ckt.views.Method",
"line_number": 40,
"usage_type": "argument"
},
{
"api_name": "django.urls.path",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "ckt.views.widgets",
"line_number": 41,
"usage_type": "argument"
}
] |
38907427149
|
import tkinter as tk
try:
import pygame
except ImportError:
audio = None
else:
audio = True
import sys
import random
import time
### Stopped Let's code: Tetris episode 19 by TigerhawkT3 at 00:58:42
### Use score_lines or high_score_lines to increase level and speed etc.
class Shape:
def __init__(self, shape, key, piece, row, column, coords):
self.shape = shape
self.key = key
self.piece = piece
self._row = row
self.kicked = False
self._rotation_index = 0
self.column = column
self.coords = coords
self.hover_time = self.spin_time = time.perf_counter()
@property
def row(self):
return self._row
@row.setter
def row(self, x):
if x != self._row and not self.kicked:
self._row = x
self.hover_time = time.perf_counter()
@property
def rotation_index(self):
return self._rotation_index
@rotation_index.setter
def rotation_index(self, x):
self._rotation_index = x
self.spin_time = time.perf_counter()
@property
def hover(self):
return time.perf_counter() - self.hover_time < 0.5
@property
def spin(self):
return time.perf_counter() - self.spin_time < 0.5
class Tetris:
def __init__(self, parent, audio):
self.debug = 'debug' in sys.argv[1:]
self.random = 'random' in sys.argv[1:]
self.hover = 'nohover' not in sys.argv[1:]
self.spin = 'spin' in sys.argv[1:]
self.kick = 'kick' in sys.argv[1:]
parent.title('Pythris')
self.parent = parent
self.audio = audio
if self.audio:
pygame.mixer.init(buffer=512)
try:
self.sounds = {name: pygame.mixer.Sound(name) for name in ('music.ogg',
'settle.ogg',
'clear.ogg',
'lose.ogg')}
except pygame.error as err:
self.audio = None
print(err)
else:
self.audio = {'m': True, 'e': True}
for char in 'mMeE':
self.parent.bind(char, self.toggle_audio)
self.sounds['music.ogg'].play(loops=-1)
self.board_width = 10
self.board_height = 24
self.high_score = 0
self.high_score_lines = 0
self.width = 200
self.height = 480
self.square_width = self.width//10
self.max_speed_score = 5000
self.speed_factor = 250
self.shapes = {'S':[['*', ''],
['*', '*'],
['', '*']],
'Z':[['', '*'],
['*', '*'],
['*', '']],
'J':[['*', '*'],
['*', ''],
['*', '']],
'L':[['*', ''],
['*', ''],
['*', '*']],
'O':[['*', '*'],
['*', '*']],
'I':[['*'],
['*'],
['*'],
['*']],
'T':[['*', '*', '*'],
['', '*', '']]
}
self.colours = {'S': '#6495ED',
'Z': '#F08080',
'J': '#B0C4DE',
'L': '#FFDAB9',
'O': '#DB7093',
'I': '#BA55D3',
'T': '#40E0D0'}
for key in ('<Down>', '<Left>', '<Right>'):
self.parent.bind(key, self.shift)
self.parent.bind('<Up>', self.rotate)
for key in ('a', 'A', 'd', 'D', 's', 'S'):
self.parent.bind(key, self.snap)
self.parent.bind('<Escape>', self.pause)
for key in ('<Control-n>', '<Control-N>'):
self.parent.bind(key, self.draw_board)
for key in ('g', 'G'):
self.parent.bind(key, self.toggle_guides)
self.canvas = None
self.preview_canvas = None
self.ticking = None
self.spawning = None
self.guide_fill = ''
self.score_var = tk.StringVar()
self.score_label = tk.Label(ROOT,
textvariable=self.score_var,
width=25,
height=5,
font=('Helvetica', 12))
self.score_label.grid(row=2, column=1, sticky="S")
self.high_score_var = tk.StringVar()
self.high_score_var.set('High Score:\n0 (0)')
self.high_score_label = tk.Label(ROOT,
textvariable=self.high_score_var,
width=25,
height=5,
font=('Helvetica', 12))
self.high_score_label.grid(row=3, column=1, sticky="N")
self.preview_label = tk.Label(ROOT,
text='Next Piece',
width=25,
height=5,
font=('Helvetica', 12))
self.preview_label.grid(row=0, column=1, sticky="S")
self.draw_board()
def tick(self):
if self.piece_is_active and not (self.spin and self.active_piece.spin):
self.shift()
self.ticking = self.parent.after(self.tickrate, self.tick)
def shift(self, event=None):
if not self.piece_is_active:
return
r = self.active_piece.row
c = self.active_piece.column
l = len(self.active_piece.shape)
w = len(self.active_piece.shape[0])
direction = (event and event.keysym) or 'Down'
# use event-keysym to check event/direction
if direction == 'Down':
rt = r+1 # row temporary
ct = c # column temporary
elif direction == 'Left':
rt = r
ct = c-1
elif direction == 'Right':
rt = r
ct = c+1
success = self.check_and_move(self.active_piece.shape, rt, ct, l, w)
if direction in 'Down' and not success and not (self.hover and self.active_piece.hover):
self.settle()
def draw_board(self, event=None):
if self.ticking:
self.parent.after_cancel(self.ticking)
if self.spawning:
self.parent.after_cancel(self.spawning)
self.score_var.set('Score:\n0')
self.board = [['' for column in range(self.board_width)]
for row in range(self.board_height)]
self.field = [[None for column in range(self.board_width)] for row in range(self.board_height)]
if self.canvas:
self.canvas.destroy()
self.canvas = tk.Canvas(ROOT, width=self.width, height=self.height)
self.canvas.grid(row=0, column=0, rowspan=4)
self.border = self.canvas.create_rectangle(2,
2,
self.width - 2,
self.height - 2,
width=2)
self.h_separator = self.canvas.create_line(0,
self.height//6,
self.width,
self.height//6,
width=2)
self.v_separator = self.canvas.create_line(self.width,
0,
self.width,
self.height,
width=2)
if self.preview_canvas:
self.preview_canvas.destroy()
self.preview_canvas = tk.Canvas(ROOT,
width=5*self.square_width,
height=5*self.square_width)
self.preview_canvas.grid(row=1, column=1)
self.tickrate = 1000
self.score = 0
self.score_lines = 0
self.piece_is_active = False
self.paused = False
self.bag = []
self.preview()
self.guides = [self.canvas.create_line(0, 0, 0, self.height),
self.canvas.create_line(0, 0, self.width, self.height)]
self.spawning = self.parent.after(self.tickrate, self.spawn)
self.ticking = self.parent.after(self.tickrate*2, self.tick)
def toggle_guides(self, event=None):
self.guide_fill = '' if self.guide_fill else 'black'
self.canvas.itemconfig(self.guides[0], fill=self.guide_fill)
self.canvas.itemconfig(self.guides[1], fill=self.guide_fill)
def toggle_audio(self, event=None):
if not event:
return
key = event.keysym.lower()
self.audio[key] = not self.audio[key]
if key == 'm':
if not self.audio['m']:
self.sounds['music.ogg'].stop()
else:
self.sounds['music.ogg'].play(loops=-1)
def pause(self, event=None):
if self.piece_is_active and not self.paused:
self.paused = True
self.piece_is_active = False
self.parent.after_cancel(self.ticking)
elif self.paused:
self.paused = False
self.piece_is_active = True
self.ticking = self.parent.after(self.tickrate, self.tick)
def print_board(self):
for row in self.board:
print(*(cell or ' ' for cell in row))
def check(self, shape, r, c, l, w):
for row, squares in zip(range(r, r+l), shape):
for column, square in zip(range(c, c+w), squares):
if ((row not in range(self.board_height))
or (column not in range(self.board_width))
or (square and self.board[row][column] == 'x')):
return
return True
def move(self, shape, r, c, l, w):
square_idxs = iter(range(4))
for row in self.board:
row[:] = ['' if cell == '*' else cell for cell in row]
for row, squares in zip(range(r, r+l), shape):
for column, square in zip(range(c, c+w), squares):
if square:
self.board[row][column] = square
square_idx = next(square_idxs)
coord = (column*self.square_width,
row*self.square_width,
(column+1)*self.square_width,
(row+1)*self.square_width)
self.active_piece.coords[square_idx] = coord
self.canvas.coords(self.active_piece.piece[square_idx], coord)
self.active_piece.row = r
self.active_piece.column = c
self.active_piece.shape = shape
self.move_guides(c, (c+w))
if self.debug:
self.print_board()
return True
def check_and_move(self, shape, r, c, l, w):
return self.check(shape, r, c, l, w) and self.move(shape, r, c, l, w)
def rotate(self, event=None):
if not self.active_piece:
return
if len(self.active_piece.shape) == len(self.active_piece.shape[0]):
self.active_piece.rotation_index = self.active_piece.rotation_index
return
r = self.active_piece.row
c = self.active_piece.column
l = len(self.active_piece.shape)
w = len(self.active_piece.shape[0])
x = c + w//2
y = r + l//2
direction = event.keysym
'''if direction in ('a', 'A'): # left
shape = rotate_array(self.active_piece.shape, -90)
rotation_index = (self.active_piece.rotation_index - 1) % 4
ra, rb = self.active_piece.rotation[rotation_index]
rotation_offsets = -ra, -rb
else: # right'''
shape = rotate_array(self.active_piece.shape, 90)
rotation_index = self.active_piece.rotation_index
rotation_offsets = self.active_piece.rotation[rotation_index]
rotation_index = (rotation_index + 1) % 4
l = len(shape)
w = len(shape[0])
rt = y - l//2
ct = x - w//2
x_correction, y_correction = rotation_offsets
rt += y_correction
ct += x_correction
if self.check_and_move(shape, rt, ct, l, w):
self.active_piece.rotation_index = rotation_index
if self.active_piece.kicked:
self.snap()
return
if self.kick:
for a, b in zip((0, 0, -1, 0, 0, -2, -1, -1, -1, -1, -2, -2, -2, -2),
(-1, 1, 0, -2, 2, 0, -1, 1, -2, 2, -1, 1, -2, 2)):
if self.check_and_move(shape, rt+a, ct+b, l, w):
self.active_piece.rotation_index = rotation_index
if not self.active_piece.kicked:
self.active_piece.kicked = a
if self.active_piece.kicked and not a:
self.snap()
return
def settle(self):
self.piece_is_active = False
for row in self.board:
row[:] = ['x' if cell == '*' else cell for cell in row]
if self.debug:
self.print_board()
for (x1, y1, x2, y2), id in zip(self.active_piece.coords, self.active_piece.piece):
self.field[y1//self.square_width][x1//self.square_width] = id
indices = [idx for idx, row in enumerate(self.board) if all(row)]
if indices:
self.score += (40, 100, 300, 1200)[len(indices) - 1]
self.score_lines += len(indices)
self.clear(indices)
if all(not cell for row in self.board for cell in row):
self.score += 2000
self.high_score = max(self.score, self.high_score)
self.high_score_lines = max(self.score_lines, self.high_score_lines)
self.score_var.set(f"Score:\n{self.score} ({self.score_lines})")
self.high_score_var.set(f"High Score:\n{self.high_score} ({self.high_score_lines})")
if self.score < self.max_speed_score:
self.tickrate = 1000 // (self.score//self.speed_factor + 1)
if any(any(row) for row in self.board[:4]):
self.lose()
return
if self.audio['e'] and not indices:
self.sounds['settle.ogg'].play()
self.spawning = self.parent.after(500 if indices and self.tickrate < 500 else self.tickrate, self.spawn)
def preview(self):
self.preview_canvas.delete(tk.ALL)
if not self.bag:
if self.random:
self.bag.append(random.choice('IJLOSTZ'))
else:
self.bag = random.sample('IJLOSTZ', 7)
key = self.bag.pop()
shape = rotate_array(self.shapes[key], random.choice((0, 90, 180, 270)))
self.preview_piece = Shape(shape, key, [], 0, 0, [])
width = len(shape[0])
half = self.square_width//2
for y, row in enumerate(shape):
for x, cell in enumerate(row):
if cell:
self.preview_piece.coords.append((self.square_width*x + half,
self.square_width*y + half,
self.square_width*(x+1) + half,
self.square_width*(y+1) + half))
self.preview_piece.piece.append(self.preview_canvas.create_rectangle(self.preview_piece.coords[-1],
fill=self.colours[key],
width=2))
self.preview_piece.rotation_index = 0
self.preview_piece.i_nudge = (len(shape) < len(shape[0])) and 4 in (len(shape), len(shape[0]))
self.preview_piece.row = self.preview_piece.i_nudge
if 3 in (len(shape), len(shape[0])):
self.preview_piece.rotation = [(0, 0),
(1, 0),
(-1, 1),
(0, -1)]
else:
self.preview_piece.rotation = [(1, -1),
(0, 1),
(0, 0),
(-1, 0)]
if len(shape) < len(shape[0]):
self.preview_piece.rotation_index += 1
def move_guides(self, left, right):
self.canvas.coords(self.guides[0], left*self.square_width, 0, left*self.square_width, self.height)
self.canvas.coords(self.guides[1], right*self.square_width, 0, right*self.square_width, self.height)
def spawn(self):
self.piece_is_active = True
self.active_piece = self.preview_piece
self.preview()
width = len(self.active_piece.shape[0])
start = (10-width)//2
self.active_piece.column = start
self.active_piece.start = start
self.active_piece.coords = []
self.active_piece.piece = []
for y, row in enumerate(self.active_piece.shape):
self.board[y+self.active_piece.i_nudge][start:start+width] = self.active_piece.shape[y]
for x, cell in enumerate(row, start=start):
if cell:
self.active_piece.coords.append((self.square_width*x,
self.square_width*(y+self.active_piece.i_nudge),
self.square_width*(x+1),
self.square_width*(y+self.active_piece.i_nudge+1)))
self.active_piece.piece.append(self.canvas.create_rectangle(self.active_piece.coords[-1],
fill=self.colours[self.active_piece.key],
width=2))
self.move_guides(start, (start+width))
if self.debug:
self.print_board()
def lose(self):
self.piece_is_active = False
if self.audio['e']:
self.sounds['lose.ogg'].play()
self.parent.after_cancel(self.ticking)
self.parent.after_cancel(self.spawning)
self.clear_iter(range(len(self.board)))
def snap(self, event=None):
down = {'s', 'S'}
left = {'a', 'A'}
right = {'d', 'D'}
if not self.piece_is_active:
return
r = self.active_piece.row
c = self.active_piece.column
l = len(self.active_piece.shape)
w = len(self.active_piece.shape[0])
direction = event.keysym if event is not None else 's'
while 1:
if self.check(self.active_piece.shape,
r+(direction in down),
c + (direction in right) - (direction in left),
l,
w):
r += direction in down
c += (direction in right) - (direction in left)
else:
break
self.move(self.active_piece.shape, r, c, l, w)
if direction in down:
self.settle()
def clear(self, indices):
if self.audio['e']:
self.sounds['clear.ogg'].play()
for idx in indices:
self.board.pop(idx)
self.board.insert(0, ['' for column in range(self.board_width)])
self.clear_iter(indices)
def clear_iter(self, indices, current_column=0):
for row in indices:
if row%2:
cc = current_column
else:
cc = self.board_width - current_column - 1
id = self.field[row][cc]
self.field[row][cc] = None
self.canvas.delete(id)
if current_column < self.board_width-1:
self.parent.after(50, self.clear_iter, indices, current_column+1)
else:
for idx, row in enumerate(self.field):
offset = sum(r > idx for r in indices)*self.square_width
for square in row:
if square:
self.canvas.move(square, 0, offset)
for row in indices:
self.field.pop(row)
self.field.insert(0, [None for x in range(self.board_width)])
def rotate_array(array, angle, wide=False):
'''
Rotates a rectangular or diamond 2D array in increments of 45 degrees.
Parameters:
array (list): a list containing sliceable sequences, such as list, tuple, or str
angle (int): a positive angle for rotation, in 45-degree increments.
wide (bool): whether a passed diamond array should rotate into a wide array
instead of a tall one (tall is the default). No effect on square matrices.
'''
angle = angle%360
if angle < 1:
return [list(row) for row in array]
lengths = list(map(len, array))
rect = len(set(lengths)) == 1
width = max(lengths)
height = sum(lengths)/width
if wide:
width, height = height, width
if not rect:
array = [list(row) for row in array]
array = [[array[row+col].pop() for row in range(width)] for col in range(height)]
angle += 45
nineties, more = divmod(angle, 90)
if nineties == 3:
array = list(zip(*array))[::-1]
else:
for i in range(nineties):
array = list(zip(*array[::-1]))
if more:
ab = abs(len(array)-len(array[0]))
m = min(len(array), len(array[0]))
tall = len(array) > len(array[0])
array = [[array[r][c] for r,c in zip(range(row-1, -1, -1), range(row))
] for row in range(1, m+1)
] + [[array[r][c] for r,c in zip(range(m-1+row*tall, row*tall-1, -1),
range(row*(not tall), m+row*(not tall)+1))
] for row in range(1, ab+(not tall))
] + [[array[r][c] for r,c in zip(range(len(array)-1, ab*tall+row-1, -1),
range(ab*(not tall)+row, len(array[0])+(not tall)))
] for row in range((not tall), m)
]
return array
ROOT = tk.Tk()
TETRIS = Tetris(ROOT, audio)
ROOT.mainloop()
|
Jack-Evitts/Pythtris
|
Tetris.py
|
Tetris.py
|
py
| 22,949 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "time.perf_counter",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "time.perf_counter",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "time.perf_counter",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "time.perf_counter",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "time.perf_counter",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "pygame.mixer.init",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "pygame.mixer",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "pygame.mixer.Sound",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "pygame.mixer",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "pygame.error",
"line_number": 69,
"usage_type": "attribute"
},
{
"api_name": "tkinter.StringVar",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "tkinter.Label",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "tkinter.StringVar",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "tkinter.Label",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "tkinter.Label",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "tkinter.Canvas",
"line_number": 203,
"usage_type": "call"
},
{
"api_name": "tkinter.Canvas",
"line_number": 222,
"usage_type": "call"
},
{
"api_name": "tkinter.ALL",
"line_number": 394,
"usage_type": "attribute"
},
{
"api_name": "random.choice",
"line_number": 397,
"usage_type": "call"
},
{
"api_name": "random.sample",
"line_number": 399,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 403,
"usage_type": "call"
},
{
"api_name": "tkinter.Tk",
"line_number": 577,
"usage_type": "call"
}
] |
27592948212
|
import serial
inp=input("Enter the port : ")
ser=serial.Serial(inp,baudrate=230400,timeout=None)
data_old=0 # always the first-fixed bit
skipped=0
cntr=0
fl=1
su=0
cx=0
while True:
if (skipped!=0):
data_old=ser.readline().decode('ascii')[0]
data_old=int(data_old)
skipped-=1
continue
data_new=ser.readline().decode('ascii')[0]
data_new=int(data_new)
if (data_old!=data_new):
skipped=3
# print(data_old)
if(fl==1):
if(data_old==0):
cntr+=1
if(data_old==1):
cntr=0
if(cntr==27):
# print("Connection Established")
cntr=0
fl=0
continue
if(fl==0):
cx+=1
su=int(data_old)+su*2
if(cx==8):
print(chr(su), end='')
cx=0
su=0
|
eclubiitk/Li-Fi-E-Club
|
Old Codes/non-queue implementation/Receiver.py
|
Receiver.py
|
py
| 910 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "serial.Serial",
"line_number": 3,
"usage_type": "call"
}
] |
22938524534
|
import cv2
import numpy as np
from model import Model
import math as m
import time
import logging as log
class headPoseEstimation():
def __init__(self, MODEL_PATH, DEVICE):
self.model_loaded = Model(MODEL_PATH, DEVICE)
self.model_loaded.get_unsupported_layer()
self.model_name = self.model_loaded.get_model_name()
self.initial_w = None
self.initial_h = None
self.frame = None
self.image_input_shape = self.model_loaded.get_input_shape()
def input_blobs(self):
return self.model_loaded.get_input_blob()
def output_blobs(self):
return self.model_loaded.get_output_blob()
def set_params(self, frame, initial_w, initial_h):
self.frame = frame
self.initial_w = initial_w
self.initial_h = initial_h
def get_inference_outputs(self):
t0 = time.perf_counter()
t_count = 0
inputs_model = self.input_blobs()
prepro_img_face = self.preprocess_frame(self.frame)
inputs_to_feed = {inputs_model[0]:prepro_img_face}
t_start = time.perf_counter()
head_pose_angles = self.inference(inputs_to_feed)
t_end = time.perf_counter()
t_count += 1
log.info("model {} is processed with {:0.2f} requests/sec ({:0.2} sec per request)".format(self.model_name, 1 / (t_end - t_start), t_end - t_start))
return head_pose_angles
def preprocess_frame(self, frame):
resize_frame = cv2.resize(frame, (self.image_input_shape[0][3], self.image_input_shape[0][2]), interpolation=cv2.INTER_AREA)
resize_frame = resize_frame.transpose((2,0,1))
resize_frame = resize_frame.reshape(1, *resize_frame.shape)
return resize_frame
def inference(self, input_data):
return self.model_loaded.get_infer_output(input_data)
|
SamyTahar/Computer-Pointer-Controller
|
src/headposeestimation.py
|
headposeestimation.py
|
py
| 1,885 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "model.Model",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "time.perf_counter",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "time.perf_counter",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "time.perf_counter",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "cv2.resize",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "cv2.INTER_AREA",
"line_number": 59,
"usage_type": "attribute"
}
] |
26971894453
|
import logging
import mtcorr
import statistics as stats
import math
import h5py
import numpy
import sys
log = logging.getLogger(__name__)
def load_from_hdf5(filename):
f = h5py.File(filename,'r')
quantiles_dict = {}
stats = {}
if 'quantiles' in f:
quantiles_dict['exp_quantiles'] = f['quantiles']['quantiles'][:,0].tolist()
quantiles_dict['quantiles'] = f['quantiles']['quantiles'][:,1].tolist()
quantiles_dict['exp_log_quantiles'] = f['quantiles']['log_quantiles'][:,0].tolist()
quantiles_dict['log_quantiles'] = f['quantiles']['log_quantiles'][:,1].tolist()
stats['quantiles_dict'] = quantiles_dict
pvals_group = f['pvalues']
method = pvals_group.attrs.get('analysis_method','')
transformation = pvals_group.get('transformation','')
if 'ks_stat' in pvals_group.attrs:
stats['ks_stats'] = {'D':pvals_group.attrs['ks_stat']}
if 'ks_pval' in pvals_group.attrs:
stats['ks_stats']['p_val'] = pvals_group.attrs['ks_pval']
if 'med_pval' in pvals_group.attrs:
stats['med_pval'] = pvals_group.attrs['med_pval']
if 'bh_thres' in pvals_group.attrs:
stats['bh_thres_d'] = {'thes_pval': math.pow(10,-pvals_group.attrs['bh_thres'])}
chromosomes = []
positions = []
scores = []
mafs = []
macs = []
additional_columns = {}
chrs = map(lambda x:x[3:],f['pvalues'].keys())
for ix,chr in enumerate(chrs):
chr_group = pvals_group['chr%s'% chr]
chromosomes.extend([chr]*len(chr_group['positions']))
positions.extend(chr_group['positions'][:].tolist())
scores.extend(chr_group['scores'][:].tolist())
mafs.extend(chr_group['mafs'][:].tolist())
macs.extend(chr_group['macs'][:].tolist())
for i,key in enumerate(chr_group.keys()):
if key not in ('positions','scores','mafs','macs'):
values = chr_group[key][:].tolist()
if key not in additional_columns:
additional_columns[key] = values
else:
additional_columns[key].extend(values)
f.close()
scores = map(lambda x:math.pow(10,-1*x), scores)
maf_dict = {'mafs':mafs,'macs':macs}
return GWASResult(chrs,chromosomes,positions,scores,maf_dict,method,transformation,stats=stats,additional_columns=additional_columns)
def load_from_csv(filename):
chromosomes = []
positions = []
pvals = []
mafs = []
macs = []
additional_columns = {}
chrs = []
chr = None
is_pval = False
with open(filename,'r') as f:
header = f.readline().rstrip()
add_header = header.split(",")[5:]
for key in add_header:
key = key.replace('"','')
additional_columns[key] = []
for row in f:
fields = row.rstrip().split(",")
if chr != fields[0]:
chr = fields[0]
chrs.append(chr)
chromosomes.append(chr)
positions.append(int(float(fields[1])))
pvals.append(float(fields[2]))
mafs.append(float(fields[3]))
macs.append(int(float(fields[4])))
if len(add_header) > 0:
for i,key in enumerate(add_header):
key = key.replace('"','')
addit_value = None
if fields[(5+i)] != '':
addit_value = float(fields[(5+i)])
additional_columns[key].append(addit_value)
is_pval = max(pvals) <= 1.0
if is_pval is False:
pvals = map(lambda x:math.pow(10,-1*x),pvals)
return GWASResult(chrs,chromosomes,positions,pvals,{'mafs':mafs,'macs':macs},additional_columns = additional_columns)
class GWASResult(object):
def __init__(self,chrs,chromosomes,positions,pvals,maf_dict,method = 'N/A',transformation = None,stats = None,additional_columns = None,step_stats = None):
self.ix_with_bad_pvalues = ix_with_bad_pvalues = numpy.where(pvals == 0.0)[0]
if len(ix_with_bad_pvalues) > 0:
pvals[ix_with_bad_pvalues] = sys.float_info.min
self.pvals = pvals
self.method = method
self.transformation = transformation
self.chrs = chrs
self.chromosomes = chromosomes
self.positions = positions
self.stats = stats
self.maf_dict = maf_dict
self.additional_columns = additional_columns
self.step_stats = step_stats
self.bonferroni_threshold = -math.log10(0.05 / len(pvals))
self.min_pval = min(pvals)
if not self.stats:
self._calculate_stats_()
def _calculate_stats_(self):
log.info('Calculating Benjamini-Hochberg threshold',extra={'progress':90})
#Calculate Benjamini-Hochberg threshold
self.stats = {}
self.stats['bh_thres_d'] = mtcorr.get_bhy_thres(self.pvals, fdr_thres=0.05)
#Calculate Median p-value
self.stats['med_pval'] = stats.calc_median(self.pvals)
#Calculate the Kolmogorov-Smirnov statistic
self.stats['ks_stats'] = stats.calc_ks_stats(self.pvals)
self.stats['quantiles_dict'] = stats.calculate_qqplot_data(self.pvals)
def get_top_snps(self,top_ratio=2500):
data = numpy.core.records.fromrecords(zip(self.chromosomes, self.positions, self.pvals, self.maf_dict['mafs'], self.maf_dict['macs'],*self.additional_columns.values()),names='chr,positions,scores,mafs,macs')
data_to_return=[]
for ix,chr in enumerate(self.chrs):
chr_data = data[numpy.where(data['chr'] == chr)]
chr_data =chr_data[chr_data['scores'].argsort()[::]][:top_ratio]
data_to_return.append(chr_data)
return numpy.concatenate(data_to_return)
def save_as_csv(self,csv_file):
data = numpy.array(zip(self.chromosomes, self.positions, self.pvals, self.maf_dict['mafs'], self.maf_dict['macs'],*self.additional_columns.values()))
data =data[numpy.lexsort((data[:,1],data[:,0]))]
additional_column_headers = self.additional_columns.keys()
header = ['chromosomes','positions','pvals','mafs','macs']
header.extend(additional_column_headers)
with open(csv_file,'w') as f:
f.write(','.join(header)+"\n")
for row in data:
rows_to_write = row.tolist()
rows_to_write[0] = int(rows_to_write[0])
rows_to_write[1] = int(rows_to_write[1])
rows_to_write[4] = int(float(rows_to_write[4]))
f.write(','.join(map(str,rows_to_write))+"\n")
def save_as_hdf5(self,hdf5_file):
positions = self.positions
chromosomes = self.chromosomes
maf_dict = self.maf_dict
scores = map(lambda x:-math.log10(x), self.pvals)
quantiles_dict = self.stats['quantiles_dict']
f = h5py.File(hdf5_file,'w')
# store quantiles
quant_group = f.create_group('quantiles')
quantiles_array = zip(quantiles_dict['exp_quantiles'],quantiles_dict['quantiles'])
log_quantiles_array = zip(quantiles_dict['exp_log_quantiles'],quantiles_dict['log_quantiles'])
quant_group.create_dataset('quantiles',(len(quantiles_dict['quantiles']), 2),'f8',data=quantiles_array)
quant_group.create_dataset('log_quantiles',(len(quantiles_dict['log_quantiles']), 2),'f8',data=log_quantiles_array)
#store pvalues
pvals_group = f.create_group('pvalues')
if len(self.ix_with_bad_pvalues) > 0:
pvals_group.attrs['ix_with_bad_pvalues'] = self.ix_with_bad_pvalues
pvals_group.attrs['numberOfSNPs'] = len(scores)
pvals_group.attrs['max_score'] = max(scores)
if self.method is not None:
pvals_group.attrs['analysis_method'] = self.method
transformation = "raw"
if self.transformation is not None:
transformation = self.transformation
pvals_group.attrs['transformation'] = transformation
pvals_group.attrs['bonferroni_threshold'] = self.bonferroni_threshold
pvals_group.attrs['ks_stat'] = self.stats['ks_stats']['D']
pvals_group.attrs['ks_pval'] = self.stats['ks_stats']['p_val']
pvals_group.attrs['med_pval'] = self.stats['med_pval']
pvals_group.attrs['bh_thres'] =-math.log10(self.stats['bh_thres_d']['thes_pval'])
data = numpy.core.records.fromrecords(zip(chromosomes, positions, scores, maf_dict['mafs'], maf_dict['macs'],*self.additional_columns.values()),names='chr,positions,scores,mafs,macs')
for ix,chr in enumerate(self.chrs):
chr_group = pvals_group.create_group("chr%s" % chr)
chr_data = data[numpy.where(data['chr'] == chr)]
chr_data =chr_data[chr_data['scores'].argsort()[::-1]]
positions = chr_data['positions']
chr_group.create_dataset('positions',(len(positions),),'i4',data=positions)
scores = chr_data['scores']
chr_group.create_dataset('scores',(len(scores),),'f8',data=scores)
mafs = chr_data['mafs']
chr_group.create_dataset('mafs',(len(mafs),),'f8',data=mafs)
macs = chr_data['macs']
chr_group.create_dataset('macs',(len(macs),),'i4',data=macs)
if len(chr_data.dtype) > 5:
for i,key in enumerate(self.additional_columns.keys()):
values = chr_data['f%s'% (5+i)]
chr_group.create_dataset(key,values.shape,values.dtype,data=values)
f.close()
|
timeu/PyGWAS
|
pygwas/core/result.py
|
result.py
|
py
| 9,529 |
python
|
en
|
code
| 20 |
github-code
|
6
|
[
{
"api_name": "logging.getLogger",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "h5py.File",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "math.pow",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "math.pow",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "math.pow",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "sys.float_info",
"line_number": 109,
"usage_type": "attribute"
},
{
"api_name": "math.log10",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "mtcorr.get_bhy_thres",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "statistics.calc_median",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "statistics.calc_ks_stats",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "statistics.calculate_qqplot_data",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "numpy.core.records.fromrecords",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "numpy.core",
"line_number": 140,
"usage_type": "attribute"
},
{
"api_name": "numpy.where",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "numpy.lexsort",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "math.log10",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "h5py.File",
"line_number": 172,
"usage_type": "call"
},
{
"api_name": "math.log10",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "numpy.core.records.fromrecords",
"line_number": 199,
"usage_type": "call"
},
{
"api_name": "numpy.core",
"line_number": 199,
"usage_type": "attribute"
},
{
"api_name": "numpy.where",
"line_number": 202,
"usage_type": "call"
}
] |
40684104940
|
#!/usr/bin/python3
"""
function that queries the Reddit API
and returns the number of subscribers
"""
import requests
def number_of_subscribers(subreddit):
"""initializate"""
if (type(subreddit) is not str):
return(0)
url_api = ("https://www.reddit.com/r/{}/about.json".format(subreddit))
headers = {'user-agent': 'safari:holberton/0.1.0'}
response = requests.get(url_api, headers=headers)
if response.status_code is not 200:
return(0)
return(response.json().get("data").get("subscribers"))
|
manosakpujiha/alx-system_engineering-devops
|
0x16-api_advanced/0-subs.py
|
0-subs.py
|
py
| 539 |
python
|
en
|
code
| 3 |
github-code
|
6
|
[
{
"api_name": "requests.get",
"line_number": 15,
"usage_type": "call"
}
] |
7997489923
|
"""
Before executing this script make sure that all packages are installed properly
and also select 3 ips from resource pool wiki which are not in use.(check using ping command)
purpose:
-------
This script is for first time setup of dcs vm which includes
accepting eula,changing password,configure the ip and changing the schema of dcs vm.
"""
from re import search, IGNORECASE
from SSHLibrary import SSHLibrary
import json
import platform, os, sys
import time
import netifaces
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(BASE_DIR)
from auto_loader import load_from_file
import logging
logging.basicConfig(level=logging.INFO)
class dcs(object):
def __init__(self, ipv6="", vmInterface="", user="", userpwd=""):
"""
Constructor method to RestAppliance.
We compute the correct API-Version for REST calls.
Parameters
----------
ipv6 : str
Ipv6 of the dcs vm to connect.
vmInterface: str
Interface of the ubuntu VM.
IPv6 address starts with fe80:: i.e. it's a link-local address, reachable only in the network segment it's directly connected to.
Using the NIC that connects to that segment specifically.
user: str
Username of the DCS VM name
userpwd: str
DCS VM password
"""
# builds endpoint
self.ipv6Endpoint = ipv6 + "%" + vmInterface
self.sshlib = SSHLibrary()
self.stdout = None
self.sshlib.open_connection(self.ipv6Endpoint)
self.sshlib.login(username=user, password=userpwd)
# sets API version
self.api_version = self.get_api_version()
logging.debug("The API Version utilized is {0}.".format(
self.api_version))
print(self.api_version)
# header information
self._header = "-H \"X-API-Version: {0}\" -H \"Content-Type: application/json\"".format(
self.api_version)
self._secure_header = None
def get_api_version(self):
"""
Helper method get_api_version
Gets latest API verisons supported from the appliance.
On failure, sets api_verison to 120
Parameters
----------
none
"""
api_command = "curl --request GET https://localhost/rest/version"
apiversions, exit_code = self.sshlib.execute_command(
command=api_command, return_rc=True)
if exit_code == 0:
api_version = json.loads(apiversions)
return api_version["currentVersion"]
else:
logging.warning(
"The API Version utilized is 120 as get_api_version return exit code 1"
)
return "120"
def build_command(self, url, request_type, payload={}, *options):
"""
Helper method build_command
creates the curl command along with headers for GEt and POST call to the appliance.
Parameters:
----------
url: str
URL location of the endpoint.
request_type: str
specifies the type of REST request. For isntance, Get, Post.
payload: dict
data to be sent to the appliance, only applicable when making a post call.
*options: list of strings
any arguments that needs to be concatinated with the curl command. For instance, "-i", "-s"
"""
url = "https://localhost" + url
if request_type == "GET":
command = "curl -X {0} {1} {2}".format(request_type, self._header,
url)
if self._secure_header != None:
command = "curl -X {0} {1} {2}".format(request_type,
self._secure_header,
url)
elif request_type == "POST":
payload = '{0}'.format(json.dumps(payload).replace("'", '"'))
command = 'curl -X {0} {1} -d \'{2}\' {3}'.format(
request_type, self._header, payload, url)
if self._secure_header != None:
command = 'curl -X {0} {1} -d \'{2}\' {3}'.format(
request_type, self._secure_header, payload, url)
if options:
option = ""
for op in options:
option = option + " " + op
command = "curl{0} -X {1} {2} -d '{3}' {4}".format(
option, request_type, self._header, payload, url)
if self._secure_header != None:
command = "curl{0} -X {1} {2} -d '{3}' {4}".format(
option, request_type, self._secure_header, payload, url)
logging.info('Executing URI {0} Request Type: {1}'.format(
url, request_type))
return command
def accept_eula_once(self, service_access="yes"):
"""
On initial communication with the appliance, the end user service agreement (EULA) must be accepted.
This only needs to occur once. Additional calls will not change the status of the EULA nor the status of the service access.
If a change to the service access is required, see the function change_service_access()
If the appliance returns an error status (anything outside of the 100 or 200 range), an error is raised.
No authentication on the appliance is required.
Parameters
----------
service_access (optional): str
"yes" will accept service access
"no" will not allow service access
empty value will default to "yes"
"""
url = '/rest/appliance/eula/status'
eula_command = self.build_command(url, "GET")
json_result, exit_code = self.sshlib.execute_command(eula_command,
return_rc=True)
if not json_result: # if False, eula acceptance has already occurred.
logging.warning('EULA does not need to be saved.')
if exit_code != 0 or json_result:
logging.debug(
'Call EULA Acceptance with enable service access={0}'.format(
service_access))
url = '/rest/appliance/eula/save'
payload = {"supportAccess": service_access}
save_eula_command = self.build_command(url, "POST", payload)
logging.warning(save_eula_command)
save_success, exit_code = self.sshlib.execute_command(
save_eula_command, return_rc=True)
if exit_code == 0:
logging.info('EULA Response {0}'.format(save_success))
else:
raise Exception('accept_eula failed. JSON Response {0}'.format(
json.dumps(save_success)))
def change_administrator_password(self):
"""
On initial logon, the administrator's password has to be changed from the default value.
The call to the administrator password change is attempted.
If the change administrator password call fails, then we attempt to login with the administrator password.
If successful, we log a message and the accurate administrator password.
If the administrator login is not successful, an error is raised.
The administrator data is pulled from the dictionary in this file. This needs to be moved to a more formal location.
Parameters
----------
none
"""
url = "/rest/users/changePassword"
payload = {
"userName": "Administrator",
"oldPassword": "admin",
"newPassword": "admin123"
}
change_pass_command = self.build_command(url, "POST", payload)
status, success = self.sshlib.execute_command(
command=change_pass_command, return_rc=True)
if success == 0:
logging.info('Administrator password change was accepted.')
else:
raise Exception(
'change_administrator_password failed. JSON Response: {0}'.
format(json.dumps(status)))
def get_secure_headers(self):
"""
Helper method to appliance_request().
Gives header information required by the appliance with authentication information.
Return
------
_secure_header: dict. Dictionary containing X-API-Verions, Content-Type, and Auth. The Auth parameter value is a sessionID.
"""
# Once _secure_header is defined, we can use it over and over again for the duration of its life.
# Note, the header is only good for that user (administrator), 24 hours, and until the next reboot.
if self._secure_header != None:
return self._secure_header
payload = {"userName": "Administrator", "password": "admin123"}
url = '/rest/login-sessions'
authentication_command = self.build_command(url, "POST", payload)
status, exit_code = self.sshlib.execute_command(
command=authentication_command, return_rc=True)
if exit_code != 0:
raise Exception(
"There was an issue with the HTTP Call to get headers. Exception message: {0}"
.format(status))
try:
safe_json = json.loads(status)
self._secure_header = self._header
if 'sessionID' not in safe_json:
raise Exception(
'Auth token for the header is undefined. No Session ID available. Status: {0}.'
.format(status))
self._secure_header = self._header + ' -H "Auth: {0}"'.format(
safe_json['sessionID'])
return self._secure_header
except:
raise Exception(
'Failure to access the sessionID from the response. JSON: {0}'.
format(status))
def get_mac(self):
"""
Helper method get_mac
Used when creating the payload for setting the ip address of the oneview dcs appliance.
returns mac address of the oneview dcs appliance.
Parameters:
----------
none
"""
url = "/rest/appliance/network-interfaces"
self.get_secure_headers()
mac_command = self.build_command(url, "GET")
data, exit_code = self.sshlib.execute_command(command=mac_command,
return_rc=True)
if exit_code != 0:
raise Exception(
'Failure to get mac address of the interface: {0}'.format(
data))
data = json.loads(data)
try:
return data["applianceNetworks"][0]["macAddress"]
except:
raise Exception('Failure to fetch macAddress from the reponse')
def change_ovDcs_ip(self, app1Ipv4Addr, app2Ipv4Addr, virtIpv4Addr,
ipv4Gateway, ipv4Subnet, ):
"""
Changes the Ip address of the oneview dcs appliance.
Parameters:
----------
app1Ipv4Addr: str
Node1 IPv4 address in a two-node cluster
app2Ipv4Addr: str
Node2 IPv4 address in a two-node cluster.
virtIpv4Addr: str
Virtual IPv4 address. Oneview dcs will be reachable from this IP.
ipv4Gateway: str
IPv4 gateway address.
ipv4Subnet: str
IPv4 subnet mask or CIDR bit count.
"""
url = "/rest/appliance/network-interfaces"
macAddress = self.get_mac()
payload = {
"applianceNetworks": [{
"activeNode": 1,
"app2Ipv4Addr": app2Ipv4Addr,
"app1Ipv4Addr": app1Ipv4Addr,
"confOneNode": True,
"hostname": "ThisIsAutomated.com",
"networkLabel": "Managed devices network",
"interfaceName": "Appliance",
"device": "eth0",
"ipv4Gateway": ipv4Gateway,
"ipv4Subnet": ipv4Subnet,
"ipv4Type": "STATIC",
"ipv6Type": "UNCONFIGURE",
"macAddress": macAddress,
"overrideIpv4DhcpDnsServers": False,
"unconfigure": False,
"slaacEnabled": "yes",
"virtIpv4Addr": virtIpv4Addr
}]
}
changeIp_command = self.build_command(url, "POST", payload, "-i")
data, exit_code = self.sshlib.execute_command(command=changeIp_command,
return_rc=True)
x = json.dumps(data)
time.sleep(2)
uri = search('Location: (.+?)\r\nCache-Control', x)
print(uri, x)
if uri != None:
task_uri = uri.group(1)
if (self.get_task(task_uri)):
logging.info("Oneview Ip is set to: {0}".format(virtIpv4Addr))
f = open('ipaddress.txt', 'w')
f.write(str(virtIpv4Addr))
return None
def get_task(self, uri):
"""Gets the task corresponding to a given task ID.
Will wait until the task is not completed.
No failure will rasie an exception.
On successful completion will return True
Parameters:
----------
uri: str
Uri of the task
"""
self.get_secure_headers()
task_command = self.build_command(uri, "GET")
data, exit_code = self.sshlib.execute_command(command=task_command,
return_rc=True)
if exit_code == 0:
task_data = json.loads(data)
while task_data["taskState"] == "Running":
logging.info("task \"{0}\" is running...".format(uri))
time.sleep(10)
data, exit_code = self.sshlib.execute_command(command=task_command,
return_rc=True)
task_data = json.loads(data)
if task_data["taskState"] == "Completed":
logging.info("task \"{0}\" completed".format(uri))
return True
else:
logging.warning(
"Unexpected failure. Task ended with state {0}, URI:{1}".
format(task_data["taskState"], uri))
return None
def search_task(self, param):
"""Gets all the tasks based upon filters provided. Note: filters are optional.
iterate through all the task collected and calls get_task() to check the status.
Used while running hardware discovery
Parameters:
----------
param: str
Filters for the finding the task uris. For example: ?filter="'name' = 'alertMax'"
filters are concatenated with the URI
"""
self.get_secure_headers()
uri = "/rest/tasks" + param
task_command = self.build_command(uri, "GET")
data, exit_code = self.sshlib.execute_command(command=task_command,
return_rc=True)
all_members = json.loads(data)
for i in all_members["members"]:
self.get_task(i["uri"])
def execute_command_in_dcs_and_verify(self, dcs_command, expected_output):
'''Execute the given Command in DCS and verify the response with Expected output.
Example
Execute Command In DCS And Verify | <dcs_command> | <expected_output> |
:param dcs_command: Command that need to be executed in DCS vm
:param expected_output: expected output from the DCS command executed
:raises AssertionError if output does not match with expected output
:return stdout: return response obtained after command execution
'''
logging.info("executing {0}".format(dcs_command))
self.stdout = self.sshlib.execute_command(dcs_command,
return_stdout=True)
if search(expected_output, self.stdout, IGNORECASE) is None:
raise AssertionError(
"DCS command output is not as expected: {} found: {}".format(
expected_output, self.stdout))
return self.stdout
def change_dcs_schematic(self, dcs_commands):
'''Changes DCS schematic to given schematic
Example
Change DCS Schematic | <dcs_commands> |
:param dcs_commands: DCS commands to be executed along with its expected output for changing the schematic
ex:[["dcs stop", "DCS is Stopped"]]
'''
for cmd in dcs_commands:
self.execute_command_in_dcs_and_verify(cmd[0], cmd[1])
time.sleep(60)
def dcs_hardware_setup(self):
'''Performs Hardware Setup in DCS appliance
Parameters:
none
'''
logging.info("executing appliance set up")
status, exit_code = self.sshlib.execute_command(
command=
"curl -i -s -o /dev/nul -I -w '%{http_code}\n' -X POST -H \"X-API-Version: "
+ str(self.api_version) +
"\" https://localhost/rest/appliance/tech-setup",
return_rc=True)
if exit_code != 0:
raise AssertionError(
"Failed to Invoke Sever Hardware discovery with status:{} and exit code:{}"
.format(status, exit_code))
elif status == "202":
self.search_task(
"?filter=\"'name'='Discover%20hardware'\"&sort=created:descending&count=1"
)
def dcs_network_configuration(self, app1Ipv4Addr, app2Ipv4Addr,
virtIpv4Addr, ipv4Gateway, ipv4Subnet):
"""Changes the passwordthe dcs appliance and sets new Ip of the appliamce.
Parameters:
app1Ipv4Addr: str
Node1 IPv4 address in a two-node cluster
app2Ipv4Addr: str
Node2 IPv4 address in a two-node cluster.
virtIpv4Addr: str
Virtual IPv4 address. Oneview dcs will be reachable from this IP.
ipv4Gateway: str
IPv4 gateway address.
ipv4Subnet: str
IPv4 subnet mask or CIDR bit count.
"""
self.accept_eula_once()
self.change_administrator_password()
self.change_ovDcs_ip(app1Ipv4Addr, app2Ipv4Addr, virtIpv4Addr,
ipv4Gateway, ipv4Subnet)
def dcs_schematic_configuration(self, dcs_commands):
'''Change DCS schematic then perform Hardware setup
:param dcs_commands: Sequence of DCS commands to be executed along with its expected output for changing the schematic
ex:[["dcs stop", "DCS is Stopped"]]
'''
# need to check if the surnning schematic is 3endl_demo then skip this step
self.change_dcs_schematic(dcs_commands)
self.dcs_hardware_setup()
self.sshlib.close_connection()
dcs_commands = [
["dcs status", "dcs is running"],
["dcs stop", "dcs is stopped"],
["dcs status", "dcs is not running"],
["dcs start /dcs/schematic/synergy_3encl_demo cold", "DCS httpd daemon started"],
[
"dcs status",
"DCS is Running\n Schematic used: /dcs/schematic/synergy_3encl_demo",
],
]
def ping(hosts):
"""
Returns True if host (str) responds to a ping request.
"""
host=hosts.strip()
# operating_sys = platform.system().lower()
exit_code = os.system("ping6 "+host+"%"+interfaces[0]+" -c 5")
# print("ping6 "+hosts+"%"+interfaces[0]+" -c 5")
if exit_code == 0:
return True
return False
interfaces = list(filter(lambda x: "ens" in x, netifaces.interfaces()))
config = load_from_file("auto_config")["fts"]
if __name__ == "__main__":
if len(interfaces) > 0:
f=open("ipv6.txt")
ipv6=f.readline()
while ipv6:
if ping(ipv6):
ipv6=ipv6.strip()
dcs_inst = dcs(ipv6, interfaces[0],
config["dcs_username"], config["dcs_password"])
dcs_inst.dcs_network_configuration(
config["dcs_ipv4_1"],
config["dcs_ipv4_2"],
config["dcs_ipv4_3"],
config["gateway"],
config["subnet_mask"],)
dcs_inst.dcs_schematic_configuration(dcs_commands)
break
else:
ipv6=f.readline()
|
Srija-Papinwar/CD
|
scripts/dcs_fts.py
|
dcs_fts.py
|
py
| 20,662 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "os.path.dirname",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "sys.path.append",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "logging.basicConfig",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "SSHLibrary.SSHLibrary",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "logging.warning",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "logging.warning",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "logging.warning",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 193,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 219,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 252,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 307,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 308,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 309,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 316,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 337,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 339,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 340,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 343,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 345,
"usage_type": "call"
},
{
"api_name": "logging.warning",
"line_number": 348,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 369,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 382,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 385,
"usage_type": "call"
},
{
"api_name": "re.IGNORECASE",
"line_number": 385,
"usage_type": "argument"
},
{
"api_name": "time.sleep",
"line_number": 400,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 408,
"usage_type": "call"
},
{
"api_name": "os.system",
"line_number": 478,
"usage_type": "call"
},
{
"api_name": "netifaces.interfaces",
"line_number": 485,
"usage_type": "call"
},
{
"api_name": "auto_loader.load_from_file",
"line_number": 486,
"usage_type": "call"
}
] |
30366445741
|
from traits.api import Bool, Instance, Float, Property
# Local relative imports
from .abstract_mapper import AbstractMapper
from .data_range_1d import DataRange1D
class Base1DMapper(AbstractMapper):
"""Defines an abstract mapping from a 1-D region in input space to a 1-D
region in output space.
"""
#: The data-space bounds of the mapper.
range = Instance(DataRange1D)
#: The screen space position of the lower bound of the data space.
low_pos = Float(0.0)
#: The screen space position of the upper bound of the data space.
high_pos = Float(1.0)
#: Convenience property to get low and high positions in one structure.
#: Must be a tuple (low_pos, high_pos).
screen_bounds = Property
#: Should the mapper stretch the dataspace when its screen space bounds are
#: modified (default), or should it preserve the screen-to-data ratio and
#: resize the data bounds? If the latter, it will only try to preserve
#: the ratio if both screen and data space extents are non-zero.
stretch_data = Bool(True)
#: The sign of the mapping: 1 if deltas match sign, -1 if opposite sign
sign = Property
# If the subclass uses a cache, _cache_valid is maintained to
# monitor its status
_cache_valid = Bool(False, transient=True)
# Indicates whether or not the bounds have been set at all, or if they
# are at their initial default values.
_low_bound_initialized = Bool(False)
_high_bound_initialized = Bool(False)
# ------------------------------------------------------------------------
# Event handlers
# ------------------------------------------------------------------------
def _low_pos_changed(self, old, new):
self._cache_valid = False
if not self.stretch_data:
self._adjust_range((old, self.high_pos), (new, self.high_pos))
self._low_bound_initialized = True
self.updated = True
def _high_pos_changed(self, old, new):
self._cache_valid = False
if not self.stretch_data:
self._adjust_range((self.low_pos, old), (self.low_pos, new))
self._high_bound_initialized = True
self.updated = True
def _range_changed(self, old, new):
if old is not None:
old.observe(self._range_change_handler, "updated", remove=True)
if new is not None:
new.observe(self._range_change_handler, "updated")
self._cache_valid = False
self.updated = new
def _range_change_handler(self, event):
"Handles the range changing; dynamically attached to our ranges"
self._cache_valid = False
self.updated = event.object
def _get_screen_bounds(self):
return (self.low_pos, self.high_pos)
def _get_sign(self):
delta_screen = self.high_pos - self.low_pos
delta_data = self.range.high - self.range.low
if delta_screen == 0 or delta_data == 0:
return 0
elif delta_screen / float(delta_data) < 0:
return -1
else:
return 1
def _set_screen_bounds(self, new_bounds):
if new_bounds[0] == self.low_pos and new_bounds[1] == self.high_pos:
return
if not self.stretch_data:
self._adjust_range((self.low_pos, self.high_pos), new_bounds)
self.trait_setq(low_pos=new_bounds[0])
self.trait_setq(high_pos=new_bounds[1])
self._cache_valid = False
self._low_bound_initialized = True
self._high_bound_initialized = True
self.updated = True
def _adjust_range(self, old_bounds, new_bounds):
initialized = (
self._low_bound_initialized and self._high_bound_initialized
)
if self.range is not None and initialized:
rangelow = self.range.low
rangehigh = self.range.high
d_data = rangehigh - rangelow
old_d_screen = old_bounds[1] - old_bounds[0]
if d_data != 0 and old_d_screen != 0:
new_data_extent = (
d_data / old_d_screen * (new_bounds[1] - new_bounds[0])
)
self.range.set_bounds(rangelow, rangelow + new_data_extent)
|
enthought/chaco
|
chaco/base_1d_mapper.py
|
base_1d_mapper.py
|
py
| 4,221 |
python
|
en
|
code
| 286 |
github-code
|
6
|
[
{
"api_name": "abstract_mapper.AbstractMapper",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "traits.api.Instance",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "data_range_1d.DataRange1D",
"line_number": 14,
"usage_type": "argument"
},
{
"api_name": "traits.api.Float",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "traits.api.Float",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "traits.api.Property",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "traits.api.Bool",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "traits.api.Property",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "traits.api.Bool",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "traits.api.Bool",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "traits.api.Bool",
"line_number": 42,
"usage_type": "call"
}
] |
27103152939
|
from itertools import product
import numpy as np
import pytest
from dcegm.pre_processing.params import process_params
from numpy.testing import assert_array_almost_equal as aaae
from scipy.special import roots_sh_legendre
from scipy.stats import norm
from toy_models.consumption_retirement_model.budget_functions import (
_calc_stochastic_income,
)
from toy_models.consumption_retirement_model.budget_functions import budget_constraint
model = ["deaton", "retirement_taste_shocks", "retirement_no_taste_shocks"]
labor_choice = [0, 1]
period = [0, 5, 7]
max_wealth = [11, 33, 50]
n_grid_points = [101, 444, 1000]
TEST_CASES = list(product(model, period, labor_choice, max_wealth, n_grid_points))
@pytest.mark.parametrize(
"model, period, labor_choice, max_wealth, n_grid_points", TEST_CASES
)
def test_get_beginning_of_period_wealth(
model, period, labor_choice, max_wealth, n_grid_points, load_example_model
):
params, options = load_example_model(f"{model}")
params = process_params(params)
sigma = params["sigma"]
r = params["interest_rate"]
consump_floor = params["consumption_floor"]
n_quad_points = options["quadrature_points_stochastic"]
child_state_dict = {"period": period, "lagged_choice": labor_choice}
savings_grid = np.linspace(0, max_wealth, n_grid_points)
_quad_points, _ = roots_sh_legendre(n_quad_points)
quad_points = norm.ppf(_quad_points) * sigma
random_saving_scalar = np.random.randint(0, n_grid_points)
random_shock_scalar = np.random.randint(0, n_quad_points)
wealth_beginning_of_period = budget_constraint(
**child_state_dict,
savings_end_of_previous_period=savings_grid[random_saving_scalar],
income_shock_previous_period=quad_points[random_shock_scalar],
options=options,
params=params,
)
_labor_income = _calc_stochastic_income(
**child_state_dict,
wage_shock=quad_points[random_shock_scalar],
min_age=options["min_age"],
constant=params["constant"],
exp=params["exp"],
exp_squared=params["exp_squared"],
)
budget_expected = (1 + r) * savings_grid[random_saving_scalar] + _labor_income
aaae(wealth_beginning_of_period, max(consump_floor, budget_expected))
|
OpenSourceEconomics/dcegm
|
tests/test_budget_equation.py
|
test_budget_equation.py
|
py
| 2,269 |
python
|
en
|
code
| 15 |
github-code
|
6
|
[
{
"api_name": "itertools.product",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "dcegm.pre_processing.params.process_params",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "scipy.special.roots_sh_legendre",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "scipy.stats.norm.ppf",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "scipy.stats.norm",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "numpy.random.randint",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.randint",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "toy_models.consumption_retirement_model.budget_functions.budget_constraint",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "toy_models.consumption_retirement_model.budget_functions._calc_stochastic_income",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "numpy.testing.assert_array_almost_equal",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 23,
"usage_type": "attribute"
}
] |
40546336482
|
import dlib
import os
import numpy as np
import matplotlib.pyplot as plt
"""
此文件为正向人脸检测模块,采用dlib实现
"""
def _shape_to_np(shape):
xy = []
for i in range(68):
xy.append((shape.part(i).x, shape.part(i).y,))
xy = np.asarray(xy, dtype='float32')
return xy
def get_landmarks(img, detector, predictor, PlotOn=False):
"""
获取人脸特征点
"""
lmarks = []
dets, scores, idx = detector.run(img, 1)
# dets = [dlib.rectangle(left=0, top=0, right=img.shape[1], bottom=img.shape[0])]
print("Number of faces detected: {}".format(len(dets)))
if len(dets) > 0:
shapes = []
for k, det in enumerate(dets):
shape = predictor(img, det)
shapes.append(shape)
xy = _shape_to_np(shape)
lmarks.append(xy)
lmarks = np.asarray(lmarks, dtype='float32')
lmarks = lmarks[0, :, :].T
if PlotOn:
display_landmarks(img, lmarks)
return lmarks
else:
return lmarks
def display_landmarks(img, lmarks):
for i in range(68):
xy = lmarks[:, i]
plt.plot(xy[0], xy[1], 'ro')
plt.text(xy[0], xy[1], str(i))
plt.imshow(img)
plt.show()
|
hamster1963/face-all-in-one-machine-backend
|
face_irobot_main/facial_feature_detector.py
|
facial_feature_detector.py
|
py
| 1,246 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "numpy.asarray",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.text",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 50,
"usage_type": "name"
}
] |
2696828667
|
from collections import Counter
from trava.ext.boosting_eval.boosting_logic import CommonBoostingEvalLogic
from trava.ext.boosting_eval.eval_steps import EvalFitSteps
from trava.fit_predictor import FitPredictConfig, FitPredictConfigUpdateStep, FitPredictorSteps
from trava.split.result import SplitResult
from trava.tracker import Tracker
class _GroupConfigUpdateStep(FitPredictConfigUpdateStep):
def __init__(self, group_col_name: str):
self._group_col_name = group_col_name
def fit_split_data(self, raw_split_data: SplitResult, config: FitPredictConfig, tracker: Tracker) -> SplitResult:
X_valid = None
if raw_split_data.X_valid is not None:
X_valid = raw_split_data.X_valid.drop(self._group_col_name, axis=1)
result = SplitResult(
X_train=raw_split_data.X_train.drop(self._group_col_name, axis=1),
y_train=raw_split_data.y_train,
X_test=raw_split_data.X_test.drop(self._group_col_name, axis=1),
y_test=raw_split_data.y_test,
X_valid=X_valid,
y_valid=raw_split_data.y_valid,
)
return result
def fit_params(
self, fit_params: dict, fit_split_data: SplitResult, config: FitPredictConfig, tracker: Tracker
) -> dict:
raw_split_data = config.raw_split_data
assert raw_split_data
train_counted_groups = self._counted_groups(X=raw_split_data.X_train)
fit_params["group"] = train_counted_groups
return fit_params
def _counted_groups(self, X):
train_groups = X[self._group_col_name].values
counted_groups = list(Counter(train_groups).values())
return counted_groups
class _GroupEvalConfigUpdateStep(_GroupConfigUpdateStep):
def __init__(self, group_col_name: str):
super().__init__(group_col_name=group_col_name)
def fit_params(
self, fit_params: dict, fit_split_data: SplitResult, config: FitPredictConfig, tracker: Tracker
) -> dict:
fit_params = super().fit_params(
fit_params=fit_params, fit_split_data=fit_split_data, config=config, tracker=tracker
)
raw_split_data = config.raw_split_data
assert raw_split_data
assert raw_split_data.X_valid is not None, "X_valid set must be present to run evaluation"
eval_counted_groups = self._counted_groups(X=raw_split_data.X_valid)
fit_params["eval_group"] = [fit_params["group"], eval_counted_groups]
return fit_params
def _counted_groups(self, X):
train_groups = X[self._group_col_name].values
counted_groups = list(Counter(train_groups).values())
return counted_groups
class GroupFitSteps(FitPredictorSteps):
"""
Simple extension for problems that are based on groups ( e.g. ranking )
that provides group parameter for training a model.
Init parameters
----------
group_col_name: str
Which column is used to store groups
"""
def __init__(self, group_col_name: str):
group_config_step = _GroupConfigUpdateStep(group_col_name=group_col_name)
super().__init__(config_steps=[group_config_step])
class GroupEvalFitSteps(EvalFitSteps):
"""
Same as GroupFitSteps, but also adds some modifications to support evaluation.
Init parameters
----------
eval_logic: Eval
Contains logic of how to perform evaluation on the model.
group_col_name: str
Which column is used to store groups
"""
def __init__(self, eval_logic: CommonBoostingEvalLogic, group_col_name: str):
group_eval_config_step = _GroupEvalConfigUpdateStep(group_col_name=group_col_name)
super().__init__(eval_logic=eval_logic)
self.config_steps.insert(0, group_eval_config_step)
|
ityutin/trava
|
trava/ext/grouped/group_steps.py
|
group_steps.py
|
py
| 3,781 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "trava.fit_predictor.FitPredictConfigUpdateStep",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "trava.split.result.SplitResult",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "trava.fit_predictor.FitPredictConfig",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "trava.tracker.Tracker",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "trava.split.result.SplitResult",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "trava.split.result.SplitResult",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "trava.fit_predictor.FitPredictConfig",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "trava.tracker.Tracker",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "collections.Counter",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "trava.split.result.SplitResult",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "trava.fit_predictor.FitPredictConfig",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "trava.tracker.Tracker",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "collections.Counter",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "trava.fit_predictor.FitPredictorSteps",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "trava.ext.boosting_eval.eval_steps.EvalFitSteps",
"line_number": 88,
"usage_type": "name"
},
{
"api_name": "trava.ext.boosting_eval.boosting_logic.CommonBoostingEvalLogic",
"line_number": 100,
"usage_type": "name"
}
] |
30031301327
|
import json
import networkx as nx
from networkx.drawing.nx_agraph import graphviz_layout
import matplotlib
import matplotlib.pyplot as plt
import networkx as nx
def read_details(pd_details):
"""[summary]
Args:
pd_details ([type]): [description]
Returns:
[type]: [description]
"""
with open(pd_details) as f:
data = json.load(f)
return data
def update_annot(ind, nodelist, pos, data, annot, G):
"""[summary]
Args:
ind ([type]): [description]
nodelist ([type]): [description]
pos ([type]): [description]
data ([type]): [description]
annot ([type]): [description]
G ([type]): [description]
"""
node_idx = ind["ind"][0]
node = list(nodelist)[node_idx]
xy = pos[node]
annot.xy = xy
node_attr = {"ID": node}
node_attr.update(G.nodes[node])
all_details = data[node]
patient_string = "Patient: {} , {}, {}".format(
"Ramesh", all_details["pBgrp"], all_details["pAge"]
)
donor_string = "Donor: {} , {}, {}".format(
"arun", all_details["dBgrp"], all_details["dAge"]
)
text = "\n".join([patient_string, donor_string])
annot.set_text(text)
return
def hover(
event, annot, nodes1, nodes2, nodes3, nodes4, top_nodes, rest, pos, data, fig, ax, G
):
"""[summary]
Args:
event ([type]): [description]
annot ([type]): [description]
nodes1 ([type]): [description]
nodes2 ([type]): [description]
nodes3 ([type]): [description]
nodes4 ([type]): [description]
top_nodes ([type]): [description]
rest ([type]): [description]
pos ([type]): [description]
data ([type]): [description]
fig ([type]): [description]
ax ([type]): [description]
G ([type]): [description]
"""
vis = annot.get_visible()
if event.inaxes == ax:
if nodes1 is not None:
cont1, ind1 = nodes1.contains(event)
cont2, ind2 = nodes2.contains(event)
else:
cont1, cont2 = False, False
if nodes3 is not None:
cont3, ind3 = nodes3.contains(event)
cont4, ind4 = nodes4.contains(event)
else:
cont3, cont4 = False, False
if cont1:
update_annot(ind1, top_nodes, pos, data, annot, G)
annot.set_visible(True)
fig.canvas.draw_idle()
elif cont2:
update_annot(ind2, top_nodes, pos, data, annot, G)
annot.set_visible(True)
fig.canvas.draw_idle()
elif cont3:
update_annot(ind3, rest, pos, data, annot, G)
annot.set_visible(True)
fig.canvas.draw_idle()
elif cont4:
update_annot(ind4, rest, pos, data, annot, G)
annot.set_visible(True)
fig.canvas.draw_idle()
else:
if vis:
annot.set_visible(False)
fig.canvas.draw_idle()
def hover_graph(G, cycles, solution_values, weight, pd_details):
"""
G : networkx graph object with all nodes, but only solution edges
cycles : list -> all possible cycles in G
solution : list -> 1 if corresponding cycle is chosen for final solution else 0
weight : dict -> keys: edges, values: edgeweights
pd_details : string -> path to JSON file (dump) with patient donor details
"""
fig, ax = plt.subplots()
pos = graphviz_layout(G)
data = read_details(pd_details)
rest = []
two_cycle_nodes_top = {}
two_cycle_nodes_bottom = {}
top_edges = []
bottom_edges = []
colour1 = "orange"
colour2 = "purple"
for i, cycle in enumerate(cycles):
if len(cycle) == 3 and solution_values[i] == 1:
### selects chosen 2 cycles and colours the top and bottom halves of the two nodes in an opposite
### manner to signify corresponding PD pairs
two_cycle_nodes_top[cycle[0]] = colour1
two_cycle_nodes_bottom[cycle[0]] = colour2
two_cycle_nodes_top[cycle[1]] = colour2
two_cycle_nodes_bottom[cycle[1]] = colour1
top_edges.append((cycle[0], cycle[1]))
bottom_edges.append((cycle[1], cycle[0]))
pos = graphviz_layout(G)
# drawing two cycle nodes
top_nodes, top_colours = two_cycle_nodes_top.keys(), two_cycle_nodes_top.values()
bottom_nodes, bottom_colours = (
two_cycle_nodes_bottom.keys(),
two_cycle_nodes_bottom.values(),
)
# nodes other than those part of two cycles, including ones that are not part of any solution cycle
rest = [n for n in G.nodes() if n not in top_nodes]
""" nodes1 : top half of two cycle nodes
nodes2 : bottom half of two cycle nodes
nodes3 : top half of remaining nodes
nodes4 : bottom half of remaining nodes
"""
nodes1 = nx.draw_networkx_nodes(
G,
pos,
nodelist=top_nodes,
node_color=top_colours,
node_size=600,
node_shape=matplotlib.markers.MarkerStyle(marker="o", fillstyle="top"),
label="P",
)
nodes2 = nx.draw_networkx_nodes(
G,
pos,
nodelist=bottom_nodes,
node_color=bottom_colours,
node_size=600,
node_shape=matplotlib.markers.MarkerStyle(marker="o", fillstyle="bottom"),
label="D",
)
# drawing remaining nodes
nodes3 = nx.draw_networkx_nodes(
G,
pos,
nodelist=rest,
label="P",
node_color=colour1,
node_size=600,
node_shape=matplotlib.markers.MarkerStyle(marker="o", fillstyle="top"),
)
nodes4 = nx.draw_networkx_nodes(
G,
pos,
nodelist=rest,
node_color=colour2,
node_size=600,
node_shape=matplotlib.markers.MarkerStyle(marker="o", fillstyle="bottom"),
)
"""
Networkx by default draws straight arcs and places edge labels on the middle of those arcs.
However, we draw curved arcs but edge labels still remain at their default position (midpoint of NodeA and NodeB) {inside the cycle}
Thus we need to offset this by supplying new positions. To maintain consistency across all scales of X and Y axis,
and positions of nodes we take the offset as 0.3 times difference between x-coordinates of the two nodes between which
the edge is drawn. Different offsets are required for top edge and bottom edge of two cycles. For three cycles, the default
placement causes no issue.
"""
pos_higher, pos_lower = {}, {}
# calculating offset
if not top_edges:
y_off = 20
else:
a, b = top_edges[0]
y_off = 0.3 * abs(pos[a][0] - pos[b][0])
for k, v in pos.items():
pos_higher[k] = (v[0], v[1] + y_off)
for k, v in pos.items():
pos_lower[k] = (v[0], v[1] - y_off)
"""
w_top : edge weights of top edges of two cycles
w_bottom : edge weights of bottom edges of two cycles
w_rest : edge weights of remaining edges which can be placed in their default location
"""
w_top = {e: str(weight[e]) for e in weight if (e in top_edges and e in G.edges())}
w_bottom = {
e: str(weight[e]) for e in weight if (e in bottom_edges and e in G.edges())
}
w_rest = {
e: str(weight[e])
for e in weight
if (e in G.edges() and e not in top_edges and e not in bottom_edges)
}
### Drawing edge labels
nx.draw_networkx_edges(
G, pos, edgelist=G.edges(), connectionstyle="arc3,rad=0.2", arrowsize=20
)
nx.draw_networkx_edge_labels(
G, pos_higher, edge_labels=w_top, label_pos=0.5, verticalalignment="top"
)
nx.draw_networkx_edge_labels(
G, pos_lower, edge_labels=w_bottom, label_pos=0.5, verticalalignment="bottom"
)
nx.draw_networkx_edge_labels(G, pos, edge_labels=w_rest, label_pos=0.5)
# =================== HOVERING =========================
### setting annotation style
annot = ax.annotate(
"",
xy=(0, 0),
xytext=(20, 20),
textcoords="offset points",
bbox=dict(boxstyle="round", fc="w"),
arrowprops=dict(arrowstyle="->"),
)
annot.set_visible(False)
idx_to_node_dict = {idx: node for idx, node in enumerate(G.nodes)}
fig.canvas.mpl_connect(
"motion_notify_event",
lambda event: hover(
event,
annot,
nodes1,
nodes2,
nodes3,
nodes4,
top_nodes,
rest,
pos,
data,
fig,
ax,
G,
),
)
plt.show()
plt.savefig("./result/output.svg", format="svg")
|
siv2r/kidney-exchange
|
global_match/hovering.py
|
hovering.py
|
py
| 8,711 |
python
|
en
|
code
| 45 |
github-code
|
6
|
[
{
"api_name": "json.load",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 117,
"usage_type": "name"
},
{
"api_name": "networkx.drawing.nx_agraph.graphviz_layout",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "networkx.drawing.nx_agraph.graphviz_layout",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "networkx.draw_networkx_nodes",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "matplotlib.markers.MarkerStyle",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "matplotlib.markers",
"line_number": 159,
"usage_type": "attribute"
},
{
"api_name": "networkx.draw_networkx_nodes",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "matplotlib.markers.MarkerStyle",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "matplotlib.markers",
"line_number": 169,
"usage_type": "attribute"
},
{
"api_name": "networkx.draw_networkx_nodes",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "matplotlib.markers.MarkerStyle",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "matplotlib.markers",
"line_number": 181,
"usage_type": "attribute"
},
{
"api_name": "networkx.draw_networkx_nodes",
"line_number": 183,
"usage_type": "call"
},
{
"api_name": "matplotlib.markers.MarkerStyle",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": "matplotlib.markers",
"line_number": 189,
"usage_type": "attribute"
},
{
"api_name": "networkx.draw_networkx_edges",
"line_number": 226,
"usage_type": "call"
},
{
"api_name": "networkx.draw_networkx_edge_labels",
"line_number": 229,
"usage_type": "call"
},
{
"api_name": "networkx.draw_networkx_edge_labels",
"line_number": 232,
"usage_type": "call"
},
{
"api_name": "networkx.draw_networkx_edge_labels",
"line_number": 235,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 268,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 268,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 269,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 269,
"usage_type": "name"
}
] |
24270720132
|
import random
import os
import glob
import cv2
import numpy as np
import json
from detectron2.structures import BoxMode
import itertools
import sys
# import some common detectron2 utilities
import pdb
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer
from detectron2.data import MetadataCatalog
import torch
class_list = ['cone','duckie','duckiebot']
"""Now, let's fine-tune a coco-pretrained R50-FPN Mask R-CNN model on the balloon dataset. It takes ~6 minutes to train 300 iterations on Colab's K80 GPU."""
from detectron2.engine import DefaultTrainer
from detectron2.config import get_cfg
cfg = get_cfg()
cfg.merge_from_file("/network/home/bhattdha/detectron2/configs/COCO-Detection/faster_rcnn_R_50_FPN_3x.yaml")
class_list = ['cone','duckie','duckiebot']
# write a function that loads the dataset into detectron2's standard format
def get_duckietown_dicts(root_dir):
annotation_file = root_dir + 'annotations/final_anns.json'
frame_path = root_dir + 'final_frames/frames/'
with open(annotation_file) as f:
data = json.load(f)
record = {}
dataset_dicts = []
class_label = {}
## giving labels to the classes
for idx,class_val in enumerate(class_list):
class_label[class_val] = idx
for name in data.keys():
# print(name)
image_name = frame_path + name
record = {}
height, width = cv2.imread(image_name).shape[:2]
record["file_name"] = image_name
record["height"] = height
record["width"] = width
objs = []
for annotation in data[name]:
ob_list = []
obj_ann = {
"bbox": [annotation['bbox'][0], annotation['bbox'][1], annotation['bbox'][0] + annotation['bbox'][2], annotation['bbox'][1] + annotation['bbox'][3]],
"bbox_mode": BoxMode.XYXY_ABS,
"category_id": annotation['cat_id'] - 1,
"iscrowd": 0
}
objs.append(obj_ann)
record["annotations"] = objs
dataset_dicts.append(record)
return dataset_dicts
from detectron2.data import DatasetCatalog, MetadataCatalog
root_dir = '/network/tmp1/bhattdha/duckietown_dataset/'
for d in ["train", "test"]:
DatasetCatalog.register("duckietown/" + d, lambda d=d: get_duckietown_dicts(root_dir))
MetadataCatalog.get('duckietown/' + d).set(thing_classes=class_list)
duckietown_metadata = MetadataCatalog.get('duckietown/train')
cfg_load = torch.load('/network/tmp1/bhattdha/duckietown_dataset/probabilistic_duckietown_OD/probabilistic_duckietown_OD_cfg.final')
##loading the config used at train time
cfg = cfg_load['cfg']
# import pdb; pdb.set_trace()
# cfg.DATASETS.TEST = () # no metrics implemented for this dataset
cfg.DATASETS.TEST = ('coco_2017_val',) # no metrics implemented for this dataset
cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 128 # faster, and good enough for this toy dataset
cfg.MODEL.ROI_HEADS.NUM_CLASSES = len(class_list) # (kitti)
cfg.OUTPUT_DIR = '/network/tmp1/bhattdha/duckietown_dataset/probabilistic_duckietown_OD/'
# cfg.MODEL.ROI_HEADS.NUM_CLASSES = len(class_list) # (kitti)
"""Now, we perform inference with the trained model on the kitti dataset. First, let's create a predictor using the model we just trained:"""
cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, "model_0014999.pth")
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.7 # set the testing threshold for this model
# cfg.DATASETS.TEST = ("kitti/test", )
predictor = DefaultPredictor(cfg)
"""Then, we randomly select several samples to visualize the prediction results."""
from detectron2.utils.visualizer import ColorMode
# im = cv2.imread('test.png')
# outputs = predictor(im)
# v = Visualizer(im[:, :, ::-1],
# metadata=duckietown_metadata,
# scale=1.0,
# instance_mode=ColorMode.IMAGE
# )
# v = v.draw_instance_predictions(outputs["instances"].to("cpu"))
# cv2.imwrite("test_out.png", v.get_image()[:, :, ::-1])
# import pdb; pdb.set_trace()
# import time
# inf_time = []
# # If the input is the camera, pass 0 instead of the video file name
# cap = cv2.VideoCapture('/network/home/bhattdha/manfred_vid.mov')
# frame_width = int(cap.get(3))
# frame_height = int(cap.get(4))
# out = cv2.VideoWriter('/network/home/bhattdha/output_prob.avi',cv2.VideoWriter_fourcc('M','J','P','G'), 20, (frame_width,frame_height))
# while(cap.isOpened()):
# ret, frame = cap.read()
# st_time = time.time()
# outputs = predictor(frame)
# end_time = time.time() - st_time
# inf_time.append(time.time() - st_time)
# # pdb.set_trace()
# v = Visualizer(frame[:, :, ::-1],
# metadata=duckietown_metadata,
# scale=1.0,
# instance_mode=ColorMode.IMAGE
# )
# # out.write(frame)
# v = v.draw_instance_predictions(outputs["instances"].to("cpu"))
# print("Tot time is: ", end_time)
# # print(type(v))
# # import ipdb; ipdb.set_trace()
# out.write(v.get_image()[:, :, ::-1])
# # When everything done, release the video capture and video write objects
# cap.release()
# out.release()
# print("Inference time: ", np.mean(np.array(inf_time)))
# dataset_dicts = get_kitti_dicts("/network/tmp1/bhattdha/kitti_dataset", 'test')
image_names = glob.glob("/network/tmp1/bhattdha/duckietown_dataset/final_frames/test/*.png")
for idx, im_name in enumerate(image_names):
print(idx, im_name)
im = cv2.imread(im_name)
outputs = predictor(im)
# pdb.set_trace()
v = Visualizer(im[:, :, ::-1],
metadata=duckietown_metadata,
scale=1.0,
instance_mode=ColorMode.IMAGE
)
v = v.draw_instance_predictions(outputs["instances"].to("cpu"))
print("saving images")
print(type(v))
cv2.imwrite("/network/tmp1/bhattdha/duckietown_dataset/probabilistic_duckietown_OD/test_outputs/" + str(idx).zfill(5) + '.png', v.get_image()[:, :, ::-1])
# cv2_imshow(v.get_image()[:, :, ::-1])
|
dhaivat1729/detectron2_CL
|
experiments/test_duckietown_detectron.py
|
test_duckietown_detectron.py
|
py
| 6,268 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "detectron2.config.get_cfg",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "detectron2.structures.BoxMode.XYXY_ABS",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_name": "detectron2.structures.BoxMode",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "detectron2.data.DatasetCatalog.register",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "detectron2.data.DatasetCatalog",
"line_number": 84,
"usage_type": "name"
},
{
"api_name": "detectron2.data.MetadataCatalog.get",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "detectron2.data.MetadataCatalog",
"line_number": 85,
"usage_type": "name"
},
{
"api_name": "detectron2.data.MetadataCatalog.get",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "detectron2.data.MetadataCatalog",
"line_number": 87,
"usage_type": "name"
},
{
"api_name": "torch.load",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 102,
"usage_type": "attribute"
},
{
"api_name": "detectron2.engine.DefaultPredictor",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "detectron2.utils.visualizer.Visualizer",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "detectron2.utils.visualizer.ColorMode.IMAGE",
"line_number": 172,
"usage_type": "attribute"
},
{
"api_name": "detectron2.utils.visualizer.ColorMode",
"line_number": 172,
"usage_type": "name"
},
{
"api_name": "cv2.imwrite",
"line_number": 178,
"usage_type": "call"
}
] |
8167072903
|
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras import regularizers
import numpy as np
import pandas as pd
import math as math
import sys
import os
import keras
from keras.models import load_model
from keras.layers import Dropout , Flatten
from keras.layers import BatchNormalization
from keras.preprocessing.text import text_to_word_sequence
from keras.preprocessing.text import one_hot
import string
from keras.layers import MaxPooling1D
from keras.layers import Flatten
from keras.layers import ConvLSTM2D
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers import LSTM,GRU,TimeDistributed
from keras.layers import Dense
from keras.layers.embeddings import Embedding
from gensim.models.word2vec import Word2Vec
def normal_string(string):
if not string:
return ""
if len(string) <= 2:
return string
if len(string) > 2 and string[0] == string[1] and string[1] == string[2]:
return normal_string(string[1:])
return string[0] + normal_string(string[1:])
def remove_space(text):
index_list = [i for i, letter in enumerate(text) if letter == '\'']
remove_list = []
for i in range(0,len(index_list)):
if index_list[i]-1 >= 0 and text[index_list[i]-1] == ' ':
remove_list.append(index_list[i]-1)
if index_list[i]+1 < len(text) and text[index_list[i]+1] == ' ':
remove_list.append(index_list[i]+1)
#remove_list.append(index_list[i])
text = "".join([char for idx, char in enumerate(text) if idx not in remove_list])
return text
mode = sys.argv[3]
test_data_filename = sys.argv[1]
t_lines = [line.rstrip('\n') for line in open(test_data_filename,'r' , errors='replace' , encoding='utf-8')]
t_lines = t_lines[1:]
for i in range(0,len(t_lines)):
num = len(str(i))
t_lines[i] = t_lines[i][num+1:]
w2v_t_lines = []
for i in range(0, len(t_lines)):
t_lines[i] = remove_space(t_lines[i])
tk = text_to_word_sequence(t_lines[i], filters='', lower=True, split=' ')
tmp_line = []
tmp = ""
for j in range(0,len(tk)):
tk[j] = tk[j].encode("ascii", errors="ignore").decode()
tk[j] = normal_string(tk[j])
tmp_line.append(tk[j])
tmp = tmp + tk[j] + " "
t_lines[i] = tmp
w2v_t_lines.append(tmp_line)
model = Word2Vec.load("gensim_w2v_0.82693_0602_model")
word_vectors = model.wv
vocab = []
for k, v in word_vectors.vocab.items():
vocab.append( (k,v.index) )
vocab = sorted(vocab , key=lambda x:x[1])
word_index_dict = {}
for i in range(0,len(vocab)):
word = vocab[i][0]
word_index_dict[word] = i+1
word_index_dict["unknown_word"] = len(vocab)+1
test_ind = []
for i in range(len(w2v_t_lines)):
tmp = []
for w in w2v_t_lines[i]:
if w not in word_index_dict:
tmp.append(word_index_dict["unknown_word"])
else:
tmp.append(word_index_dict[w])
test_ind.append(tmp)
rnn_model = load_model("0602_gensim_0.82693.h5")
test = sequence.pad_sequences(test_ind, maxlen=33)
p = 0.0
p += rnn_model.predict(test)
ans_filename = sys.argv[2]
ans_file = open(ans_filename , 'w')
ans_file.write("id,label\n")
for i in range(0,len(p)):
ans_file.write(str(i))
ans_file.write(',')
if p[i][0] >= 0.5:
ans_file.write('1')
else:
ans_file.write('0')
ans_file.write('\n')
|
muachilin/Machine-Learning
|
hw5/hw5_test.py
|
hw5_test.py
|
py
| 3,172 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "sys.argv",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "keras.preprocessing.text.text_to_word_sequence",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "gensim.models.word2vec.Word2Vec.load",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "gensim.models.word2vec.Word2Vec",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "keras.models.load_model",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "keras.preprocessing.sequence.pad_sequences",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "keras.preprocessing.sequence",
"line_number": 99,
"usage_type": "name"
},
{
"api_name": "sys.argv",
"line_number": 102,
"usage_type": "attribute"
}
] |
31975617255
|
from django import template
from ..models import Page
register = template.Library()
@register.simple_tag
def main_menu():
"Query top-level pages"
return Page.objects.with_tree_fields().filter(
parent=None, is_active=True)
|
dnknth/feincms-demo
|
pages/templatetags/menus.py
|
menus.py
|
py
| 240 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "django.template.Library",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "django.template",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "models.Page.objects.with_tree_fields",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "models.Page.objects",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "models.Page",
"line_number": 9,
"usage_type": "name"
}
] |
23552963573
|
#########################################################################
# File Name: getKmerFromVCF_REF.py
# Author: yanbo
# mail: [email protected]
# Created Time: Thu 09 May 2019 10:45:06 AEST
#########################################################################
#!/bin/bash
import collections
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
import re
import sys
import tools
import read
def write_pair_kmer(outFile, kmers):
sortedKmers = sorted(kmers)
with open(outFile, "w") as f:
for (kmer1, kmer2, pos) in sortedKmers:
#f.write("%s %s %s %s %s %s %s %s\n" % (ele[0], ele[1], ele[2], ele[3], ele[4], ele[5], tools.reverse(ele[0]), tools.reverse(ele[1]) ) )
f.write("%s %s %s\n" % (kmer1, kmer2, pos) )
def get_snp_pair_kmer(vcfFilename):
snps = read.read_vcf(vcfFilename)
kmerFilename="chr" + sys.argv[1] + ".snp.real." + sys.argv[2] + "mer"
kmers = []
for key in snps:
#assert seq[key-1] == snps[key][0] or seq[key-1] == snps[key][1]
assert seq[key-1] == snps[key][0]
h1 = seq[key-int(k/2)-1 : key-1] + snps[key][0] + seq[key : key+int(k/2) ] # 0
h2 = seq[key-int(k/2)-1 : key-1] + snps[key][1] + seq[key : key+int(k/2) ] # 1
if h1.count('N') > 0 or h2.count('N') > 0:
continue
'''
new_h1 = tools.reverse(h1) # 0
new_h2 = tools.reverse(h2) # 1
min_h= min(h1,h2)
min_newh = min(new_h1, new_h2)
ID = snps[key][2]
if min_h < min_newh:
if h1 < h2:
kmers.append( (h1, h2, key, ID, 0, 1) )
else:
kmers.append( (h2, h1, key, ID, 1, 0) )
else:
if new_h1 < new_h2:
kmers.append( (new_h1, new_h2, key, ID, 0, 1) )
else:
kmers.append( (new_h2, new_h1, key, ID, 1, 0) )
'''
smallerH1, smallerH2 = tools.get_smaller_pair_kmer(h1, h2)
kmers.append( (smallerH1, smallerH2, key) )
write_pair_kmer(kmerFilename, kmers)
def get_indel_pair_kmer(vcfFilename):
indels = read.read_vcf(vcfFilename)
kmerFilename="chr" + sys.argv[1] + ".indel.real." + sys.argv[2] + "mer"
kmers = []
indel_length1_cnt = 0
for key in indels:
s1, s2, ID = indels[key]
lenS1, lenS2 = len(s1), len(s2)
if lenS1 + lenS2 > 3:
continue
indel_length1_cnt += 1
assert lenS1 + lenS2 >= 2
if len(s1) == 1 and len(s2) == 2:
assert seq[key-1] == s1
assert s2[0] != s2[1]
#while s2[1] == seq[key-1]: # delete content is s2[1]
#key = key-1 # delete happen at "AAA" region, always think delete first poisition
h1 = seq[key-int(k/2) : key+int(k/2)] # k-1
h2 = seq[key-int(k/2) : key-1] + s2 + seq[key : key+int(k/2)] # 1 # len: k
assert len(h1) == k-1 and len(h2) == k
h1, h2 = h2, h1 # h1 always is longer one
initialH1 = h1
if h1.count('N') > 0 or h2.count('N') > 0:
continue
#print key, "11", h1
smallerH1, smallerH2 = tools.get_smaller_pair_kmer(h1, h2)
kmers.append( (smallerH1, smallerH2, key) )
# delete happen at multipe "AAAA" region, more pair kmer happen
l = len(h1)
mid = l/2
i=1
while mid+i<l and initialH1[mid+i] == initialH1[mid]:
h1 = seq[key-int(k/2)+i : key+int(k/2)+i] # move right i
h2 = seq[key-int(k/2)+i : key-1] + s2 + seq[key : key+int(k/2)+i] # move right i
h1, h2 = h2, h1 # h1 always is longer one
smallerH1, smallerH2 = tools.get_smaller_pair_kmer(h1, h2)
#print key, "aa"
kmers.append( (smallerH1, smallerH2, key) )
i+=1
i=1
while mid-i>=0 and initialH1[mid-i] == initialH1[mid]:
h1 = seq[key-int(k/2)-i : key+int(k/2)-i] # move left i
h2 = seq[key-int(k/2)-i : key-1] + s2 + seq[key : key+int(k/2)-i] # move right i
h1, h2 = h2, h1 # h1 always is longer one
smallerH1, smallerH2 = tools.get_smaller_pair_kmer(h1, h2)
#print key, "bb"
kmers.append( (smallerH1, smallerH2, key) )
i+=1
''' # for test can grouth-truth can always keep min strand delete first
if h1 > tools.reverse(h1):
print "aa"
print h1, h2
print tools.reverse(h1), tools.reverse(h2)
while s2[1] == seq[key]:
key+=1
h1 = seq[key-int(k/2) : key+int(k/2)] # k-1
h2 = seq[key-int(k/2) : key] + s2[1] + seq[key : key+int(k/2)] # 1 # len: k
h1, h2 = h2, h1
print h1, h2
print tools.reverse(h1), tools.reverse(h2)
'''
elif len(s1) == 2 and len(s2) == 1:
assert seq[key-1:key+1] == s1
assert s1[0] != s1[1]
h1 = seq[key-int(k/2) : key+int(k/2)+1] # k
h2 = seq[key-int(k/2) : key] + seq[key+1 : key+int(k/2)+1] # 1 # len: k-1
assert len(h1) == k and len(h2) == k-1
initialH1 = h1
if h1.count('N') > 0 or h2.count('N') > 0:
continue
smallerH1, smallerH2 = tools.get_smaller_pair_kmer(h1, h2)
#print key, "22"
kmers.append( (smallerH1, smallerH2, key) )
l = len(h1)
mid = l/2
i=1
while initialH1[mid+i] == initialH1[mid]:
h1 = seq[key-int(k/2)+i : key+int(k/2)+1+i] # k
h2 = seq[key-int(k/2)+i : key] + seq[key+1 : key+int(k/2)+1+i] # 1 # len: k-1
smallerH1, smallerH2 = tools.get_smaller_pair_kmer(h1, h2)
#print key, "cc"
kmers.append( (smallerH1, smallerH2, key) )
i+=1
i=1
while initialH1[mid-i] == initialH1[mid]:
h1 = seq[key-int(k/2)-i : key+int(k/2)+1-i] # k
h2 = seq[key-int(k/2)-i : key] + seq[key+1 : key+int(k/2)+1-i] # 1 # len: k-1
smallerH1, smallerH2 = tools.get_smaller_pair_kmer(h1, h2)
#print key, "dd"
kmers.append( (smallerH1, smallerH2, key) )
i+=1
print ("there are ", indel_length1_cnt, "indels, create ", len(kmers), "indel pair kmer")
write_pair_kmer(kmerFilename, kmers)
'''
allFile = "chr" + sys.argv[1] + ".all." + sys.argv[2] + "mer"
foutAll = open(allFile, "w")
for i in range(seqLen-21):
mer = seq[i:i+k]
if mer.count('N') > 0:
continue
Rmer = tools.reverse(mer)
if Rmer < mer:
mer = Rmer
foutAll.write("%s %s\n" % (mer, i))
foutAll.close()
'''
# this simulate data is based on hg18
refFilename="/home/yulin/bio/Data/reference/NCBI36_hg18/chr22.fa"
snpVCFFile="/home/yulin/bio/VariationCalling/data/NA12878/VCF/NA12878_hg18_snp_VCFs/chr22.vcf"
indelVCFFile="/home/yulin/bio/VariationCalling/data/NA12878/VCF/NA12878_hg18_indel_VCFs/chr22.vcf"
# this illumina data align to hg19
#print ("input chrID kmer-size")
#refFilename ="/home/yulin/bio/Data/reference/GRCh37_hg19/chr" + sys.argv[1] + ".fa"
#vcfFilename ="/home/yulin/software/HapCUT2/reproduce_hapcut2_paper/run_hapcut2_fosmid/data/NA12878_hg19_VCFs/chr" + sys.argv[1] + ".phased.vcf"
record = SeqIO.read(open(refFilename), "fasta")
print (record.id)
seq = str(record.seq).upper()
seqLen = len(seq)
k=int(sys.argv[2])
get_snp_pair_kmer(snpVCFFile)
get_indel_pair_kmer(indelVCFFile)
|
yanboANU/VariationCalling
|
libprism/evaluate/getKmerFromVCF_REF.py
|
getKmerFromVCF_REF.py
|
py
| 7,794 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "read.read_vcf",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "tools.get_smaller_pair_kmer",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "read.read_vcf",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 64,
"usage_type": "attribute"
},
{
"api_name": "tools.get_smaller_pair_kmer",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "tools.get_smaller_pair_kmer",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "tools.get_smaller_pair_kmer",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "tools.get_smaller_pair_kmer",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "tools.get_smaller_pair_kmer",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "tools.get_smaller_pair_kmer",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "Bio.SeqIO.read",
"line_number": 188,
"usage_type": "call"
},
{
"api_name": "Bio.SeqIO",
"line_number": 188,
"usage_type": "name"
},
{
"api_name": "sys.argv",
"line_number": 192,
"usage_type": "attribute"
}
] |
4991495509
|
#!/usr/bin/python3
# enable debugging
import cgi, cgitb
import json
import requests
import responses
cgitb.enable()
class Expense:
def __init__(self, exp_name,exp_date,exp_amount,exp_type):
self.name = exp_name
self.date = exp_date
self.amount = exp_amount
self.type = exp_type
form = cgi.FieldStorage()
exp_name = form.getvalue('exp_name')
exp_date = form.getvalue('exp_date')
exp_amount = form.getvalue('exp_amount')
exp_type = form.getvalue('exp_type')
expense = Expense(exp_name,exp_date,exp_amount,exp_type)
jsonString = json.dumps(expense.__dict__)
jsonFile = open("/var/www/html/data.json", "a+")
jsonFile.write(jsonString)
jsonFile.close()
print('Content-Type: text/plain')
print('')
print('sucessful')
print('<br>')
print(jsonString)
|
eliz-liu/money_site_html
|
form.py
|
form.py
|
py
| 795 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "cgitb.enable",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "cgi.FieldStorage",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 25,
"usage_type": "call"
}
] |
5200586519
|
"""
This module contains the main transmittance/reflectance calculation
bits.
Users can run the calculations through `model.Model()` and avoid
accessing `core` directly.
"""
import numpy as np
import scipy as sp
def rt_amp(index, delta, theta, pol):
"""Calculate the reflected and transmitted amplitudes through the
system.
Parameters
----------
index : numpy array
An array of refractive indices, ordered from source layer to
terminator layer.
delta : numpy array
An array of wavenumber offsets.
theta : numpy array
An array of angles in radians.
pol : string
The polarization of the source wave: 's' or 'p',
or 'u'.
Returns
-------
r, t : tuple
A tuple where 'r' is the reflected amplitude, and 't' is the
transmitted amplitude.
"""
t_amp, r_amp = make_rt_amp_matrix(index, theta, pol)
m_mat = make_m_matrix(index, t_amp, r_amp, delta)
m_prime = make_2x2(1., 0., 0., 1., dtype=complex)
for i in range(1, len(index)-1):
m_prime = np.dot(m_prime, m_mat[i])
C_m = make_2x2(1., r_amp[0, 1], r_amp[0, 1], 1., dtype=complex)
m_prime = np.dot(C_m / t_amp[0, 1], m_prime)
trans_amp = 1 / m_prime[0, 0]
ref_amp = m_prime[1, 0] / m_prime[0, 0]
return ref_amp, trans_amp
def make_rt_amp_matrix(index, theta, pol):
"""Construct reflection and transmission amplitude matrices.
Parameters
----------
index : numpy array
An array of refractive indices, ordered from source layer to
terminator layer.
theta : numpy array
An array of angles in radians.
pol : string
The polarization of the source wave: 's' or 'p'.
Returns
-------
t_mat, r_mat : tuple
The t- and r-amplitude matrices.
"""
t_mat = np.zeros((len(index), len(index)), dtype=complex)
r_mat = np.zeros((len(index), len(index)), dtype=complex)
for i in range(len(index) - 1):
t_mat[i, i+1] = t_interface(index[i], index[i+1], theta[i], theta[i+1], pol)
r_mat[i, i+1] = r_interface(index[i], index[i+1], theta[i], theta[i+1], pol)
return t_mat, r_mat
def make_m_matrix(index, t_matrix, r_matrix, delta):
"""Construct the characteristic matrix of the model.
Parameters
----------
index : numpy array
An array of refractive indices, ordered from source layer to
terminator layer.
t_matrix : numpy array
The t-amplitude matrix
r_matrix : numpy array
The r-amplitude matrix
delta : numpy array
An array of wavenumber offsets.
Returns
-------
m_mat : numpy array
The characteristic matrix of the model
"""
m_mat = np.zeros((len(index), 2, 2), dtype=complex)
for i in range(1, len(index)-1):
C_m = make_2x2(np.exp(-1j * delta[i]), 0., 0., np.exp(1j * delta[i]),
dtype=complex)
r_m = make_2x2(1., r_matrix[i, i+1], r_matrix[i, i+1], 1., dtype=complex)
m_mat[i] = (1 / t_matrix[i, i+1]) * np.dot(C_m, r_m)
return m_mat
def r_power(r_amp):
"""Return the fraction of reflected power.
Parameters
----------
r_amp : float
The net reflection amplitude after calculating the transfer
matrix.
Returns
-------
R : numpy array
The model reflectance
"""
return np.abs(r_amp)**2
def t_power(t_amp, index_i, index_f, theta_i, theta_f):
"""Return the fraction of transmitted power.
Parameters
----------
t_amp : float
The net transmission amplitude after calculating the transfer
matrix.
index_i : float
The index of refraction of the source material.
index_f : float
The index of refraction of the terminating material.
theta_i : float
The angle of incidence (radians) at the initial interface.
theta_f : float
The angle of incidence (radians) at the final interface.
Returns
-------
T : numpy array
The model transmittance
"""
return np.abs(t_amp**2) * \
( (index_f * np.cos(theta_f)) / (index_i * np.cos(theta_i) ) )
def r_interface(index1, index2, theta1, theta2, pol):
"""Calculate the reflected amplitude at an interface.
Parameters
----------
index1 : float
The index of refraction of the first material.
index2 : float
The index of refraction of the second material.
theta1 : float
The angle of incidence at interface 1, in radians
theta2 : float
The angle of incidence at interface 2, in radians
pol : string
The polarization of the source wave (either 's' or 'p').
Returns
-------
reflected amplitude : float
The amplitude of the reflected field at the interface
"""
if pol == 's':
numerator = (index1 * np.cos(theta1) - index2 * np.cos(theta2))
denominator = (index1 * np.cos(theta1) + index2 * np.cos(theta2))
elif pol == 'p':
numerator = (index2 * np.cos(theta1) - index1 * np.cos(theta2))
denominator = (index1 * np.cos(theta2) + index2 * np.cos(theta1))
else:
raise ValueError("Polarization must be 's' or 'p'")
return numerator / denominator
def t_interface(index1, index2, theta1, theta2, pol):
"""Calculate the transmission amplitude at an interface.
Parameters
----------
index1 : float
The index of refraction of the first material.
index2 : float
The index of refraction of the second material.
theta1 : float
The angle of incidence at interface 1, in radians
theta2 : float
The angle of incidence at interface 2, in radians
pol : string
The polarization of the source wave (either 's' or 'p').
Returns
-------
transmitted_amplitude : float
The amplitude of the transmitted field at the interface
"""
if pol == 's':
numerator = 2 * index1 * np.cos(theta1)
denominator = (index1 * np.cos(theta1) + index2 * np.cos(theta2))
elif pol == 'p':
numerator = 2 * index1 * np.cos(theta1)
denominator = (index1 * np.cos(theta2) + index2 * np.cos(theta1))
else:
raise ValueError("Polarization must be 's' or 'p'")
return numerator / denominator
def wavenumber(freq, index, tand):
"""Calculate the wavenumber in a material.
Parameters
----------
freq : float
The frequency at which to calculate the wavevector, k
tand : numpy array
An array of loss tangents, ordered from source to terminating
index : numpy array
An array of refractive indices, ordered from source to
terminating layer
Returns
-------
k : array
The complex wavenumber, k
"""
k = 2 * np.pi * (freq / 3e8) * index * np.sqrt(1 + 1j * tand)
return k
def alpha2imagn(freq, a, b, n):
"""Convert Halpern's 'a' and 'b' from an absorption coefficient
of the form `a*freq**b` to a (frequency-dependent) .
Parameters
----------
freq : numpy array or float
The frequency (Hz) (or frequencies) at which to calculate the loss
tangent.
a : float
Halpern's 'a' coefficient
b : float
Halpern's 'b' coefficient
n : float
The real part of the material's refractive index
Returns
-------
imagn : numpy array or float
The imaginary component of the refractive index
"""
nu = freq / 30e9
# First we need the frequency-dependent absorption coefficient,
# alpha, which we get from the Halpern fit. From that we will
# calculate k(appa), the extinction coefficient, for each
# frequency of interest
alpha = 2 * a * nu**b
# This is the absorption-extinction coefficient relation as ~written
# in Born & Wolf Principles of Optics 1st Ed., 1959, Ch. 13.1,
# Pg. 614, Eq. 21
# The factor of 3e10 (c in units of cms^-1) ensures that our k is
# unitless, as it ought to be.
imagn = (100 * 3e8 * alpha) / (4 * np.pi * n * freq)
return imagn
def alpha2tand(freq, a, b, n):
"""Convert Halpern's 'a' and 'b' from an absorption coefficient
of the form `a*freq**b` to a (frequency-dependent) loss tangent.
Parameters
----------
freq : numpy array or float
The frequency (Hz) (or frequencies) at which to calculate the loss
tangent.
a : float
Halpern's 'a' coefficient
b : float
Halpern's 'b' coefficient
n : float
The real part of the material's refractive index
Returns
-------
tand : numpy array
The loss tangent of the material at the given frequency and
Halpern coefficients.
"""
imagn = alpha2imagn(freq, a, b, n)
# The complex index of refraction of a material is related to the
# complex (relative) permittivity by the relation:
# e_r = e' + i*e'' = n^2 = (n + i*k)^2 = n^2 - k^2 + i*2nk
# By equating the real and imaginary parts we are left with:
# e' = (n^2 - k^2); e'' = 2nk
# With this information we can find the loss tangent, which is simply
# the ratio of the real and imaginary parts of the relative
# permittivity:
# tand = (e''/e')
ep = n**2 - imagn**2
epp = 2 * n * imagn
tand = epp / ep
return tand
def make_2x2(a11, a12, a21, a22, dtype=float):
"""Return a 2x2 array quickly.
Parameters
----------
a11 : float
Array element [0, 0].
a12 : float
Array element [0, 1].
a21 : float
Array element [1, 0].
a22 : float
Array element [1, 1].
dtype : dtype, optional
The datatype of the array. Defaults to float.
Returns
-------
array : numpy array
A 2x2 array [[a11, a12], [a21, a22]]
"""
array = np.empty((2, 2), dtype=dtype)
array[0, 0] = a11
array[0, 1] = a12
array[1, 0] = a21
array[1, 1] = a22
return array
def prop_wavenumber(k, d, theta):
"""Propagate the wave through a material and calculate its offset,
delta.
Parameters
----------
k : array
The wavenumber
d : array
An array of distances (thicknesses), ordered from source to
terminating layer
theta : float
The angle the wave passes through the medium
Returns
-------
delta : array
The phase difference
"""
# Turn off 'invalid multiplication' error; it's just the 'inf' boundaries
olderr = sp.seterr(invalid='ignore')
delta = k * d * np.cos(theta)
# Now turn the error back on
sp.seterr(**olderr)
return delta
def refract(n, theta0):
"""Calculate the angle by which an incident ray is refracted
Parameters
----------
n : numpy array
An array of refractive indices, ordered from source layer to
terminator layer.
theta0 : float
The initial angle of incidence (radians)
Returns
-------
thetas : numpy array
The Snell angles at each interface
"""
# Make a nice pairwise generator so we can avoid playing games with
# index counting
thetas = [theta0]
ngen = zip(n, n[1:])
for i, rind in enumerate(ngen):
theta = np.arcsin(np.real_if_close( rind[0] * np.sin(thetas[i]) / rind[1] ))
thetas.append(theta)
return np.asarray(thetas)
def replace_tand(freq, tand_array, halpern_dict):
"""Calculate a frequency-dependent loss tangent from a material's
Halpern coefficiencts if they exist.
Parameters
----------
freq : float
The frequency at which to calculate the loss tangent
tand_array : numpy array
The loss tangents of the materials, ordered from Source to
Terminator
halpern_dict : dict
A dictionary keyed by layer index, containing Halpern coefficients
Returns
-------
tand_array : numpy array
The loss tangents of the materials, ordered from Source to
Terminator. Where possible, the Halpern coefficients have been
applied to make the terms frequency-dependent.
"""
for k, v in halpern_dict.items():
tand_array[k] = alpha2tand(freq, v['a'], v['b'], v['n'])
return tand_array
def main(params):
"""Run a transmittance/reflectance calculation for the given parameters.
This function is the primary entry-point to the calculation, and should
not be called directly. Instead, call `Model.run()`.
If you must call `core.main()` directly, only do so after first calling
`Model.set_up()`.
Parameters
----------
params : dict
The dictionary contructed by `Model.set_up`. See that function
documentation for details.
Returns
-------
result : dict
A dictionary with three keys:
* `frequency`: the frequency (in Hz) at which T and R were calculated
* `transmittance`: the output transmittance (T) of the model
* `reflectance`: the output reflectance (R) of the model
"""
rind = params['rind']
thick = params['thick']
tand = params['tand']
pol = params['pol']
theta0 = params['theta0']
theta = refract(rind, theta0)
freq = params['freq']
halps = params['halpern_layers']
# Create containers for the reflection/transmission values we calculate
# at each frequency
ts = []
rs = []
for f in freq:
if len(halps.keys()) > 0:
tand = replace_tand(f, tand, halps)
ks = wavenumber(f, rind, tand)
delta = prop_wavenumber(ks, thick, theta)
r_amp, t_amp = rt_amp(rind, delta, theta, pol)
t_pow = t_power(t_amp, rind[0], rind[-1], theta[0], theta[-1])
r_pow = r_power(r_amp)
ts.append(t_pow)
rs.append(r_pow)
ts = np.asarray(ts)
rs = np.asarray(rs)
results = {'frequency':freq, 'transmittance':ts, 'reflectance':rs}
return results
|
anadolski/armmwave
|
armmwave/core.py
|
core.py
|
py
| 13,897 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "numpy.dot",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "numpy.dot",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "numpy.exp",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "numpy.dot",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 195,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 198,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 199,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 222,
"usage_type": "attribute"
},
{
"api_name": "numpy.sqrt",
"line_number": 222,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 258,
"usage_type": "attribute"
},
{
"api_name": "numpy.empty",
"line_number": 321,
"usage_type": "call"
},
{
"api_name": "scipy.seterr",
"line_number": 348,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 349,
"usage_type": "call"
},
{
"api_name": "scipy.seterr",
"line_number": 351,
"usage_type": "call"
},
{
"api_name": "numpy.arcsin",
"line_number": 375,
"usage_type": "call"
},
{
"api_name": "numpy.real_if_close",
"line_number": 375,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 375,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 377,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 452,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 453,
"usage_type": "call"
}
] |
70994878268
|
# -*- coding: utf-8 -*-
import PySide2.QtWidgets as qtwidgets
import PySide2.QtCore as qtcore
import PySide2.QtGui as qtgui
import PySide2.QtNetwork as qtnetwork
import os.path
import signal
import socket
class HButtonBar(qtwidgets.QWidget):
layout=qtwidgets.QHBoxLayout
def __init__(self,def_list):
qtwidgets.QWidget.__init__(self)
b_layout=self.layout()
for label,callback in def_list:
button = qtwidgets.QPushButton(label)
button.clicked.connect(callback)
b_layout.addWidget(button)
self.setLayout(b_layout)
class VButtonBar(HButtonBar):
layout=qtwidgets.QVBoxLayout
class OpenFileWidget(qtwidgets.QWidget):
def __init__(self):
qtwidgets.QWidget.__init__(self)
self.field=qtwidgets.QLineEdit()
button=qtwidgets.QPushButton("Browse...")
layout = qtwidgets.QHBoxLayout()
layout.addWidget(self.field,stretch=1)
layout.addWidget(button,stretch=0)
self.setLayout(layout)
button.pressed.connect(self._open)
def text(self):
return self.field.text()
def setText(self,txt):
self.field.setText(txt)
def blockTextSignals(self,flag):
self.field.blockSignals(flag)
def _open(self):
dialog = qtwidgets.QFileDialog(self)
dialog.setFileMode(qtwidgets.QFileDialog.ExistingFile)
dialog.setAcceptMode(qtwidgets.QFileDialog.AcceptOpen)
old=self.field.text()
if not old:
dialog.setDirectory(".")
else:
dialog.setDirectory(os.path.dirname(old))
dialog.selectFile(old)
if dialog.exec_():
fnames = dialog.selectedFiles()
self.field.setText(fnames[0])
class SaveFileWidget(OpenFileWidget):
def _open(self):
dialog = qtwidgets.QFileDialog(self)
dialog.setFileMode(qtwidgets.QFileDialog.AnyFile)
dialog.setAcceptMode(qtwidgets.QFileDialog.AcceptSave)
old=self.field.text()
if not old:
dialog.setDirectory(".")
else:
dialog.setDirectory(os.path.dirname(old))
dialog.selectFile(old)
if dialog.exec_():
fnames = dialog.selectedFiles()
self.field.setText(fnames[0])
class OpenDirWidget(OpenFileWidget):
def _open(self):
dialog = qtwidgets.QFileDialog(self)
dialog.setFileMode(qtwidgets.QFileDialog.Directory)
dialog.setAcceptMode(qtwidgets.QFileDialog.AcceptOpen)
dialog.setOptions(qtwidgets.QFileDialog.ShowDirsOnly)
old=self.field.text()
if not old:
dialog.setDirectory(".")
else:
dialog.setDirectory(os.path.dirname(old))
dialog.selectFile(old)
if dialog.exec_():
fnames = dialog.selectedFiles()
self.field.setText(fnames[0])
class SignalWakeupHandler(qtnetwork.QAbstractSocket):
def __init__(self, parent=None):
super().__init__(qtnetwork.QAbstractSocket.UdpSocket, parent)
self.old_fd = None
# Create a socket pair
self.wsock, self.rsock = socket.socketpair(type=socket.SOCK_DGRAM)
# Let Qt listen on the one end
self.setSocketDescriptor(self.rsock.fileno())
# And let Python write on the other end
self.wsock.setblocking(False)
self.old_fd = signal.set_wakeup_fd(self.wsock.fileno())
# First Python code executed gets any exception from
# the signal handler, so add a dummy handler first
self.readyRead.connect(lambda : None)
# Second handler does the real handling
self.readyRead.connect(self._readSignal)
def __del__(self):
# Restore any old handler on deletion
if self.old_fd is not None and signal and signal.set_wakeup_fd:
signal.set_wakeup_fd(self.old_fd)
def _readSignal(self):
# Read the written byte.
# Note: readyRead is blocked from occuring again until readData()
# was called, so call it, even if you don't need the value.
data = self.readData(1)
# Emit a Qt signal for convenience
self.signalReceived.emit(data[0])
signalReceived = qtcore.Signal(int)
class FormDialog(qtwidgets.QDialog):
def _font(self,style,size):
font_db = qtgui.QFontDatabase()
family="Raleway"
font=font_db.font(family,style,size)
return font
def __init__(self,window,title,form,*args,**kwargs):
super().__init__(window,*args,**kwargs)
self.setWindowTitle(title)
flags = qtwidgets.QDialogButtonBox.Ok | qtwidgets.QDialogButtonBox.Cancel
button_box = qtwidgets.QDialogButtonBox(flags)
button_box.accepted.connect(self.accept)
button_box.rejected.connect(self.reject)
for w in button_box.findChildren(qtwidgets.QWidget):
w.setFont(self._font("Medium",10))
f_widget=qtwidgets.QWidget()
self._form=form
f_widget.setLayout(self._form)
for w in f_widget.findChildren(qtwidgets.QWidget):
w.setFont(self._font("Medium",10))
v_layout = qtwidgets.QVBoxLayout()
v_layout.addWidget(f_widget)
v_layout.addWidget(button_box)
self.setLayout(v_layout)
def get_data(self):
print("dialog")
ret=self.exec_()
data=list(self._form.get_data())
data.append(ret==self.Accepted)
return tuple(data)
class AwesomeToolBar(qtwidgets.QToolBar):
def _font(self,family,style,size):
font_db = qtgui.QFontDatabase()
family="Font Awesome 5 "+family
font=font_db.font(family,style,size)
return font
def __init__(self,parent): #icon,tooltip,size=8,style="Solid",family="Free"):
qtwidgets.QToolBar.__init__(self,parent)
def addAction(self,icon,tooltip,size=8,style="Solid",family="Free"):
action=qtwidgets.QToolBar.addAction(self,icon)
action.setToolTip(tooltip)
action.setFont(self._font(family,style,size))
return action
class AddRootProxyModel(qtcore.QIdentityProxyModel):
root="==root=="
def data(self, index, role):
parent=index.parent()
if parent.isValid():
return qtcore.QIdentityProxyModel.data(self,index,role)
row=index.row()
if row==0:
if role not in [ qtcore.Qt.DisplayRole, qtcore.Qt.EditRole]:
ret=qtcore.QIdentityProxyModel.data(self,index,role)
print(role,ret)
return ret
return "----"
sibling=index.sibling(row-1,index.column())
return qtcore.QIdentityProxyModel.data(self,sibling,role)
def flags(self,index):
if not index.parent().isValid():
if index.row()==0:
return qtcore.Qt.ItemIsEnabled | qtcore.Qt.ItemIsSelectable | qtcore.Qt.ItemNeverHasChildren
return qtcore.Qt.ItemIsEnabled | qtcore.Qt.ItemIsSelectable
def rowCount(self,index):
if index.isValid():
if index.parent().isValid():
return qtcore.QIdentityProxyModel.rowCount(self,index)
if index.row()==0: return 0
return qtcore.QIdentityProxyModel.rowCount(self,index)
return 1+qtcore.QIdentityProxyModel.rowCount(self)
def index(self,row,column,parent=qtcore.QModelIndex()):
if parent.isValid():
return qtcore.QIdentityProxyModel.index(self,row,column,parent)
if row==0:
ret=self.createIndex(0,column,self.root)
return ret
old=qtcore.QIdentityProxyModel.index(self,row-1,column,parent)
return self.createIndex(row,column,old.internalPointer())
def parent(self,index):
if not index.isValid(): return qtcore.QModelIndex()
obj=index.internalPointer()
if obj==self.root: return qtcore.QModelIndex()
return qtcore.QIdentityProxyModel.parent(self,index)
def mapToSource(self,proxyIndex):
new_index=qtcore.QIdentityProxyModel.mapToSource(self,proxyIndex)
if new_index.internalPointer()==self.root:
return qtcore.QModelIndex()
return new_index
def mapFromSource(self,sourceIndex):
new_index=qtcore.QIdentityProxyModel.mapFromSource(self,sourceIndex)
if new_index.parent().isValid(): return new_index
return self.createIndex(1+new_index.row(),new_index.column(),
new_index.internalPointer())
# def mapFromSource(self, sourceIndex):
# if not sourceIndex.isValid(): return qtcore.QModelIndex()
# parent=sourceIndex.parent()
# if parent.isValid():
# return self.createIndex(sourceIndex.row(),
# sourceIndex.column(),
# sourceIndex.internalPointer())
# return self.createIndex(1+sourceIndex.row(),
# sourceIndex.column(),
# sourceIndex.internalPointer())
# def mapToSource(self, proxyIndex):
# if not proxyIndex.isValid(): return qtcore.QModelIndex()
# parent=proxyIndex.parent()
# if parent.isValid:
# return qtcore.QIdentityProxyModel.mapToSource(self,proxyIndex)
# obj=proxyIndex.internalPointer()
# if obj==self.root: return qtcore.QModelIndex()
# return self.sourceModel().createIndex(proxyIndex.row()-1,
# proxyIndex.column(),
# obj)
|
chiara-paci/djvueditor
|
lib/python/djvuedlib/widgets.py
|
widgets.py
|
py
| 9,581 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "PySide2.QtWidgets.QWidget",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "PySide2.QtWidgets",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "PySide2.QtWidgets.QHBoxLayout",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "PySide2.QtWidgets",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "PySide2.QtWidgets.QWidget.__init__",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "PySide2.QtWidgets.QWidget",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "PySide2.QtWidgets",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "PySide2.QtWidgets.QPushButton",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "PySide2.QtWidgets",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "PySide2.QtWidgets.QVBoxLayout",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "PySide2.QtWidgets",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "PySide2.QtWidgets.QWidget",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "PySide2.QtWidgets",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "PySide2.QtWidgets.QWidget.__init__",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "PySide2.QtWidgets.QWidget",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "PySide2.QtWidgets",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "PySide2.QtWidgets.QLineEdit",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "PySide2.QtWidgets",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "PySide2.QtWidgets.QPushButton",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "PySide2.QtWidgets",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "PySide2.QtWidgets.QHBoxLayout",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "PySide2.QtWidgets",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "PySide2.QtWidgets.QFileDialog",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "PySide2.QtWidgets",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "PySide2.QtWidgets.QFileDialog",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "PySide2.QtWidgets",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "PySide2.QtWidgets.QFileDialog",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "PySide2.QtWidgets",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "os.path.path.dirname",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "os.path.path",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "os.path",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "PySide2.QtWidgets.QFileDialog",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "PySide2.QtWidgets",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "PySide2.QtWidgets.QFileDialog",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_name": "PySide2.QtWidgets",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "PySide2.QtWidgets.QFileDialog",
"line_number": 68,
"usage_type": "attribute"
},
{
"api_name": "PySide2.QtWidgets",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "os.path.path.dirname",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "os.path.path",
"line_number": 73,
"usage_type": "attribute"
},
{
"api_name": "os.path",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "PySide2.QtWidgets.QFileDialog",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "PySide2.QtWidgets",
"line_number": 82,
"usage_type": "name"
},
{
"api_name": "PySide2.QtWidgets.QFileDialog",
"line_number": 83,
"usage_type": "attribute"
},
{
"api_name": "PySide2.QtWidgets",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "PySide2.QtWidgets.QFileDialog",
"line_number": 84,
"usage_type": "attribute"
},
{
"api_name": "PySide2.QtWidgets",
"line_number": 84,
"usage_type": "name"
},
{
"api_name": "PySide2.QtWidgets.QFileDialog",
"line_number": 85,
"usage_type": "attribute"
},
{
"api_name": "PySide2.QtWidgets",
"line_number": 85,
"usage_type": "name"
},
{
"api_name": "os.path.path.dirname",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "os.path.path",
"line_number": 90,
"usage_type": "attribute"
},
{
"api_name": "os.path",
"line_number": 90,
"usage_type": "name"
},
{
"api_name": "PySide2.QtNetwork.QAbstractSocket",
"line_number": 96,
"usage_type": "attribute"
},
{
"api_name": "PySide2.QtNetwork",
"line_number": 96,
"usage_type": "name"
},
{
"api_name": "PySide2.QtNetwork.QAbstractSocket",
"line_number": 99,
"usage_type": "attribute"
},
{
"api_name": "PySide2.QtNetwork",
"line_number": 99,
"usage_type": "name"
},
{
"api_name": "socket.socketpair",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "socket.SOCK_DGRAM",
"line_number": 102,
"usage_type": "attribute"
},
{
"api_name": "signal.set_wakeup_fd",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "signal.set_wakeup_fd",
"line_number": 116,
"usage_type": "attribute"
},
{
"api_name": "signal.set_wakeup_fd",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "PySide2.QtCore.Signal",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "PySide2.QtCore",
"line_number": 127,
"usage_type": "name"
},
{
"api_name": "PySide2.QtWidgets.QDialog",
"line_number": 129,
"usage_type": "attribute"
},
{
"api_name": "PySide2.QtWidgets",
"line_number": 129,
"usage_type": "name"
},
{
"api_name": "PySide2.QtGui.QFontDatabase",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "PySide2.QtGui",
"line_number": 131,
"usage_type": "name"
},
{
"api_name": "PySide2.QtWidgets.QDialogButtonBox",
"line_number": 139,
"usage_type": "attribute"
},
{
"api_name": "PySide2.QtWidgets",
"line_number": 139,
"usage_type": "name"
},
{
"api_name": "PySide2.QtWidgets.QDialogButtonBox",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "PySide2.QtWidgets",
"line_number": 141,
"usage_type": "name"
},
{
"api_name": "PySide2.QtWidgets.QWidget",
"line_number": 145,
"usage_type": "attribute"
},
{
"api_name": "PySide2.QtWidgets",
"line_number": 145,
"usage_type": "name"
},
{
"api_name": "PySide2.QtWidgets.QWidget",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "PySide2.QtWidgets",
"line_number": 148,
"usage_type": "name"
},
{
"api_name": "PySide2.QtWidgets.QWidget",
"line_number": 151,
"usage_type": "attribute"
},
{
"api_name": "PySide2.QtWidgets",
"line_number": 151,
"usage_type": "name"
},
{
"api_name": "PySide2.QtWidgets.QVBoxLayout",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "PySide2.QtWidgets",
"line_number": 154,
"usage_type": "name"
},
{
"api_name": "PySide2.QtWidgets.QToolBar",
"line_number": 166,
"usage_type": "attribute"
},
{
"api_name": "PySide2.QtWidgets",
"line_number": 166,
"usage_type": "name"
},
{
"api_name": "PySide2.QtGui.QFontDatabase",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "PySide2.QtGui",
"line_number": 168,
"usage_type": "name"
},
{
"api_name": "PySide2.QtWidgets.QToolBar.__init__",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "PySide2.QtWidgets.QToolBar",
"line_number": 174,
"usage_type": "attribute"
},
{
"api_name": "PySide2.QtWidgets",
"line_number": 174,
"usage_type": "name"
},
{
"api_name": "PySide2.QtWidgets.QToolBar.addAction",
"line_number": 177,
"usage_type": "call"
},
{
"api_name": "PySide2.QtWidgets.QToolBar",
"line_number": 177,
"usage_type": "attribute"
},
{
"api_name": "PySide2.QtWidgets",
"line_number": 177,
"usage_type": "name"
},
{
"api_name": "PySide2.QtCore.QIdentityProxyModel",
"line_number": 182,
"usage_type": "attribute"
},
{
"api_name": "PySide2.QtCore",
"line_number": 182,
"usage_type": "name"
},
{
"api_name": "PySide2.QtCore.QIdentityProxyModel.data",
"line_number": 188,
"usage_type": "call"
},
{
"api_name": "PySide2.QtCore.QIdentityProxyModel",
"line_number": 188,
"usage_type": "attribute"
},
{
"api_name": "PySide2.QtCore",
"line_number": 188,
"usage_type": "name"
},
{
"api_name": "PySide2.QtCore.Qt",
"line_number": 191,
"usage_type": "attribute"
},
{
"api_name": "PySide2.QtCore",
"line_number": 191,
"usage_type": "name"
},
{
"api_name": "PySide2.QtCore.QIdentityProxyModel.data",
"line_number": 192,
"usage_type": "call"
},
{
"api_name": "PySide2.QtCore.QIdentityProxyModel",
"line_number": 192,
"usage_type": "attribute"
},
{
"api_name": "PySide2.QtCore",
"line_number": 192,
"usage_type": "name"
},
{
"api_name": "PySide2.QtCore.QIdentityProxyModel.data",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "PySide2.QtCore.QIdentityProxyModel",
"line_number": 197,
"usage_type": "attribute"
},
{
"api_name": "PySide2.QtCore",
"line_number": 197,
"usage_type": "name"
},
{
"api_name": "PySide2.QtCore.Qt",
"line_number": 202,
"usage_type": "attribute"
},
{
"api_name": "PySide2.QtCore",
"line_number": 202,
"usage_type": "name"
},
{
"api_name": "PySide2.QtCore.Qt",
"line_number": 203,
"usage_type": "attribute"
},
{
"api_name": "PySide2.QtCore",
"line_number": 203,
"usage_type": "name"
},
{
"api_name": "PySide2.QtCore.QIdentityProxyModel.rowCount",
"line_number": 208,
"usage_type": "call"
},
{
"api_name": "PySide2.QtCore.QIdentityProxyModel",
"line_number": 208,
"usage_type": "attribute"
},
{
"api_name": "PySide2.QtCore",
"line_number": 208,
"usage_type": "name"
},
{
"api_name": "PySide2.QtCore.QIdentityProxyModel.rowCount",
"line_number": 210,
"usage_type": "call"
},
{
"api_name": "PySide2.QtCore.QIdentityProxyModel",
"line_number": 210,
"usage_type": "attribute"
},
{
"api_name": "PySide2.QtCore",
"line_number": 210,
"usage_type": "name"
},
{
"api_name": "PySide2.QtCore.QIdentityProxyModel.rowCount",
"line_number": 211,
"usage_type": "call"
},
{
"api_name": "PySide2.QtCore.QIdentityProxyModel",
"line_number": 211,
"usage_type": "attribute"
},
{
"api_name": "PySide2.QtCore",
"line_number": 211,
"usage_type": "name"
},
{
"api_name": "PySide2.QtCore.QModelIndex",
"line_number": 213,
"usage_type": "call"
},
{
"api_name": "PySide2.QtCore",
"line_number": 213,
"usage_type": "name"
},
{
"api_name": "PySide2.QtCore.QIdentityProxyModel.index",
"line_number": 215,
"usage_type": "call"
},
{
"api_name": "PySide2.QtCore.QIdentityProxyModel",
"line_number": 215,
"usage_type": "attribute"
},
{
"api_name": "PySide2.QtCore",
"line_number": 215,
"usage_type": "name"
},
{
"api_name": "PySide2.QtCore.QIdentityProxyModel.index",
"line_number": 219,
"usage_type": "call"
},
{
"api_name": "PySide2.QtCore.QIdentityProxyModel",
"line_number": 219,
"usage_type": "attribute"
},
{
"api_name": "PySide2.QtCore",
"line_number": 219,
"usage_type": "name"
},
{
"api_name": "PySide2.QtCore.QModelIndex",
"line_number": 223,
"usage_type": "call"
},
{
"api_name": "PySide2.QtCore",
"line_number": 223,
"usage_type": "name"
},
{
"api_name": "PySide2.QtCore.QModelIndex",
"line_number": 225,
"usage_type": "call"
},
{
"api_name": "PySide2.QtCore",
"line_number": 225,
"usage_type": "name"
},
{
"api_name": "PySide2.QtCore.QIdentityProxyModel.parent",
"line_number": 226,
"usage_type": "call"
},
{
"api_name": "PySide2.QtCore.QIdentityProxyModel",
"line_number": 226,
"usage_type": "attribute"
},
{
"api_name": "PySide2.QtCore",
"line_number": 226,
"usage_type": "name"
},
{
"api_name": "PySide2.QtCore.QIdentityProxyModel.mapToSource",
"line_number": 229,
"usage_type": "call"
},
{
"api_name": "PySide2.QtCore.QIdentityProxyModel",
"line_number": 229,
"usage_type": "attribute"
},
{
"api_name": "PySide2.QtCore",
"line_number": 229,
"usage_type": "name"
},
{
"api_name": "PySide2.QtCore.QModelIndex",
"line_number": 231,
"usage_type": "call"
},
{
"api_name": "PySide2.QtCore",
"line_number": 231,
"usage_type": "name"
},
{
"api_name": "PySide2.QtCore.QIdentityProxyModel.mapFromSource",
"line_number": 235,
"usage_type": "call"
},
{
"api_name": "PySide2.QtCore.QIdentityProxyModel",
"line_number": 235,
"usage_type": "attribute"
},
{
"api_name": "PySide2.QtCore",
"line_number": 235,
"usage_type": "name"
}
] |
36484764773
|
import cv2
import glob
from matplotlib import pyplot as plt
faceDet = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
faceDet_two = cv2.CascadeClassifier("haarcascade_frontalface_alt2.xml")
faceDet_three = cv2.CascadeClassifier("haarcascade_frontalface_alt.xml")
faceDet_four = cv2.CascadeClassifier("haarcascade_frontalface_alt_tree.xml")
fishface = cv2.face.FisherFaceRecognizer_create()
fishface.read('fish.xml')
emotions = ["neutral", "anger", "contempt", "disgust", "fear", "happy", "sadness", "surprise"]
for files in glob.glob("C:\\Users\\HP\\Desktop\\classify\\*"):
gray = cv2.imread(files)
gray = cv2.cvtColor(gray, cv2.COLOR_BGR2GRAY)
face = faceDet.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=10, minSize=(5, 5), flags=cv2.CASCADE_SCALE_IMAGE)
face_two = faceDet_two.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=10, minSize=(5, 5), flags=cv2.CASCADE_SCALE_IMAGE)
face_three = faceDet_three.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=10, minSize=(5, 5), flags=cv2.CASCADE_SCALE_IMAGE)
face_four = faceDet_four.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=10, minSize=(5, 5), flags=cv2.CASCADE_SCALE_IMAGE)
if len(face) == 1:
facefeatures = face
elif len(face_two) == 1:
facefeatures = face_two
elif len(face_three) == 1:
facefeatures = face_three
elif len(face_four) == 1:
facefeatures = face_four
else:
facefeatures = ""
for (x, y, w, h) in facefeatures:
gray = gray[y:y+h, x:x+w]
try:
gray = cv2.resize(gray, (350, 350))
except:
pass
plt.subplot(132)
plt.title('img')
plt.imshow(gray, 'gray')
plt.xticks([])
plt.yticks([])
plt.show()
Class, abc = fishface.predict(gray)
print(emotions[Class])
|
dishavarshney9/uhack
|
classi.py
|
classi.py
|
py
| 1,880 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "cv2.CascadeClassifier",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "cv2.CascadeClassifier",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "cv2.CascadeClassifier",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "cv2.CascadeClassifier",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "cv2.face.FisherFaceRecognizer_create",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "cv2.face",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "glob.glob",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRAY",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "cv2.CASCADE_SCALE_IMAGE",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "cv2.CASCADE_SCALE_IMAGE",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "cv2.CASCADE_SCALE_IMAGE",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "cv2.CASCADE_SCALE_IMAGE",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "cv2.resize",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xticks",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.yticks",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 44,
"usage_type": "name"
}
] |
20841031996
|
from sklearn.model_selection import train_test_split
from sklearn import svm
def svm_classification(X, y, C_in, gamma_in, kernel_in):
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, shuffle=y, random_state=42)
classifierSVM = svm.SVC(C=C_in, degree=2, gamma=gamma_in, kernel=kernel_in)
# training
classifierSVM.fit(X_train, y_train)
# prediksi data test
y_pred_SVM = classifierSVM.predict(X_test)
# return X_train, X_test, y_train, y_test, classifierSVM, y_pred_SVM
return classifierSVM, y_pred_SVM, y_test
|
mfaisalafandi/identification_teks_ulasan_svm
|
Klasifikasi.py
|
Klasifikasi.py
|
py
| 566 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "sklearn.svm.SVC",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "sklearn.svm",
"line_number": 7,
"usage_type": "name"
}
] |
71474372027
|
# Rotating or flipping an image
from PIL import Image
def main():
image = Image.open('../lenna.png')
image.show('Original')
# Rotate 60 degrees counter clockwise
rotated_image = image.rotate(60)
rotated_image.show('Rotate 60')
# Rotate using Image.transpose
# Transpose supports these values:
# - Image.FLIP_LEFT_RIGHT
# - Image.FLIP_TOP_BOTTOM
# - Image.ROTATE_90
# - Image.ROTATE_180
# - Image.ROTATE_270
rotated_image = image.transpose(Image.ROTATE_90)
rotated_image.show('Rotate 90')
# Flip horizontal
flipped_image = image.transpose(Image.FLIP_LEFT_RIGHT)
flipped_image.show('Flip horizontal')
# Flip vertical
flipped_image = image.transpose(Image.FLIP_TOP_BOTTOM)
flipped_image.show('Flip vertical')
if __name__ == '__main__':
main()
|
gkostadinov/py-pil-imageprocessing
|
1-transformations/2.rotate.py
|
2.rotate.py
|
py
| 837 |
python
|
en
|
code
| 5 |
github-code
|
6
|
[
{
"api_name": "PIL.Image.open",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "PIL.Image.ROTATE_90",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "PIL.Image.FLIP_LEFT_RIGHT",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "PIL.Image.FLIP_TOP_BOTTOM",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image",
"line_number": 28,
"usage_type": "name"
}
] |
43536088674
|
import re
import math
import scipy.stats as stats
from statsmodels.stats.multitest import multipletests
import numpy as np
import pandas as pd
from tqdm import tqdm
# import functools
import pprint
from mutagene.dna import (
nucleotides, complementary_nucleotide,
bases_dict,
# comp_dict,
extended_nucleotides, complementary_extended_nucleotide)
from mutagene.io.motifs import get_known_motifs
import logging
logger = logging.getLogger(__name__)
def identify_motifs(samples_mutations, custom_motif=None, strand=None, threshold=None, dump_matches=None, stat_type=None):
"""
:param samples_mutations: list of mutations from input file
:param custom_motif: specified motif to search for
:param strand: strand(s) to search on (T: transcribed, N: non-transcribed, A: any, or a combination theirof: 'TNA')
:param dump_matches: pass through to process_mutations, stores all motif matches
:param stat_type: pass through to process_mutations, choose statistical test
:return: command-line output
"""
motif_matches = []
sig_motif_matches = []
pvals = []
if strand is None:
strand = 'A'
else:
strand = set(strand) # in case TNA codes repeat
if threshold is None:
threshold = 0.05
if custom_motif:
search_motifs = scanf_motif(custom_motif)
else:
motifs = get_known_motifs()
search_motifs = motifs.copy()
# search_motifs.extend(scanf_motif(custom_motif))
_strand_map = {
'T': 'transcribed',
'N': 'non-transcribed',
'A': 'any strand'
}
disable_progress_bar = logger.getEffectiveLevel() == logging.DEBUG
for sample, mutations in tqdm(samples_mutations.items(), leave=False, disable=disable_progress_bar):
if mutations is not None and len(mutations) > 0:
first_mut_seq_with_coords = mutations[0][-1]
window_size = (len(first_mut_seq_with_coords) - 1) // 2
for m in tqdm(search_motifs, leave=False, disable=disable_progress_bar):
for s in strand:
result, saved_data = process_mutations(
mutations,
m['motif'],
m['position'],
m['ref'],
m['alt'],
window_size,
s,
stat_type=stat_type)
if dump_matches:
for chrom, pos in saved_data['mutation_motif']:
dump_matches.write(
"chr{}\t{}\t{}\t{}\t{}\t{}\n".format(
chrom, pos, int(pos) + 1, sample, m['logo'], _strand_map[s]))
debug_data = {
'sample': sample,
'motif': m['logo'],
'strand': s}
debug_data.update(result)
debug_string = pprint.pformat(debug_data, indent=4)
logger.debug(debug_string)
motif_matches.append({
'sample': sample,
'mutagen': m['name'],
'motif': m['logo'],
'strand': _strand_map[s],
'enrichment': result['enrichment'],
'mut_min': result['mutation_load'],
'mut_max': result['bases_mutated_in_motif'],
'odds_ratio': result['odds_ratio'],
'pvalue': result['pvalue']
})
pvals.append(result['pvalue'])
qvalues = get_corrected_pvalues(pvals)
for i, motif_dict in enumerate(motif_matches):
motif_matches[i]['qvalue'] = qvalues[i]
if motif_dict['mut_min'] == 0:
continue
if motif_dict['qvalue'] >= threshold:
continue
sig_motif_matches.append(motif_dict)
return sig_motif_matches
def scanf_motif(custom_motif):
""" recognize motif syntax like A[C>T]G and create a motif entry """
m = re.search(
r'([' + extended_nucleotides + ']*)\\[([' + nucleotides + '])>([' + extended_nucleotides + '])\\]([' + extended_nucleotides + ']*)',
custom_motif.upper())
if m:
g = m.groups('')
# print("GROUPS", m.group(1), m.group(2), m.group(3), m.group(4))
entry = {}
entry['logo'] = m.group(0)
entry['motif'] = g[0] + g[1] + g[3]
entry['position'] = len(g[0])
entry['ref'] = g[1]
entry['alt'] = g[2]
if entry['ref'] == entry['alt']:
return []
entry['name'] = 'Custom motif'
entry['references'] = ''
return [entry, ]
return []
def calculate_RR(ct):
"""
Mutation is treatment
No mutation is placebo
:param ct: mutually exclusive counts of mutated matching motifs, matching mutations, matching motifs, and matching bases
:return: enrichment or risk ratio
"""
try:
RR = ((ct.loc['mutation', 'motif'] / (ct.loc['mutation', 'motif'] + ct.loc['mutation', 'no motif'])) /
(ct.loc['no mutation', 'motif'] / (ct.loc['no mutation', 'motif'] + ct.loc['no mutation', 'no motif'])))
except ZeroDivisionError:
RR = 0.0
return RR
def calculate_RR_for_motif(ct):
"""
Motif is treatment
No motif is placebo
:param ct: mutually exclusive counts of mutated matching motifs, matching mutations, matching motifs, and matching bases
:return: enrichment or risk ratio
"""
try:
RR = ((ct.loc['mutation', 'motif'] / (ct.loc['mutation', 'motif'] + ct.loc['no mutation', 'motif'])) /
(ct.loc['mutation', 'no motif'] / (ct.loc['mutation', 'no motif'] + ct.loc['no mutation', 'no motif'])))
except ZeroDivisionError:
RR = 0.0
return RR
def calculate_OR(ct):
"""
:param ct: mutually exclusive counts of mutated matching motifs, matching mutations, matching motifs, and matching bases
:return: odds ratio
"""
try:
OR = (
(ct.loc['mutation', 'motif'] / ct.loc['mutation', 'no motif']) /
(ct.loc['no mutation', 'motif'] / ct.loc['no mutation', 'no motif']))
except ZeroDivisionError:
OR = 0.0
return OR
def Haldane_correction(ct, pseudocount=0.5):
"""
:param ct: mutually exclusive counts of mutated matching motifs, matching mutations, matching motifs, and matching bases
:return: contigency table after Haldane correction is applied
"""
""" apply Haldane correction (+ 0.5) if any of the values in the contingency table is zero """
return ct + pseudocount if np.any(np.isclose(ct.to_numpy(), 0.0)) else ct
def calculate_mutation_load(N_mutations, enrichment):
"""
Mutation load (minimum estimate) calculation following Gordenin et al protocol
However, at this point motif matches are not filtered for p-value significance
That's done in the end after multiple testing correction
"""
mutation_load = 0.0
if enrichment > 1.0:
mutation_load = N_mutations * (enrichment - 1) / enrichment
# elif p_value < p_value_threshold: tests for enrichment depletion
return mutation_load
def get_stats(ct, stat_type='fisher'):
"""
Calculate Fisher and Chi2 test pvalues,
:param ct: counts of mutated matching motifs, matching mutations, matching motifs, and matching bases
:param stat_type: Type of pvalue (Fisher's ('fisher') or Chi-Square ('chi2'))
:return: pvalue of the corresponding statistical test
"""
p_val = 1.0
if stat_type is None:
stat_type = 'fisher'
stat_type = stat_type.lower()
acceptable_tests = ('fisher', 'chi2')
if stat_type not in acceptable_tests:
logger.warning('get_stats() can only calculate p-values for ' + str(acceptable_tests))
if stat_type == 'fisher':
try:
p_val = stats.fisher_exact(ct, alternative="greater")[1]
# if p_val > 0.05:
# p_val = stats.fisher_exact(ct, alternative="less")[1] #calculates if motif is underrepresented
except ValueError:
p_val = 1.0
elif stat_type == 'chi2':
try:
p_val = stats.chi2_contingency(ct)[1]
except ValueError:
p_val = 1.0
return p_val
def get_corrected_pvalues(p_values):
qvalues = []
if len(p_values):
qvalues = multipletests(pvals=p_values, method='fdr_bh')[1]
return qvalues
# @functools.lru_cache(maxsize=None)
def get_rev_comp_seq(sequence):
"""
:param sequence: forward DNA sequence
:return: reverse complimentary DNA sequence
"""
# rev_comp_seq = "".join([complementary_nucleotide[i] for i in reversed(sequence)])
cn = complementary_nucleotide
return [(i[0], i[1], cn[i[2]], '-') for i in reversed(sequence)]
def mutated_base(mutation, ref, alt):
"""
:param mutation: [(record.CHROM, record.POS, record.REF, record.ALT)]
:param ref: list the nucleotide base pre-mutation
:param alt: list the nucleotide base post-mutation
:return: True if mutation matches the specified ref and alt
"""
# makes sure single base substitution
_, _, mut_ref, mut_alt = mutation
if mut_alt and mut_ref and len(mut_ref) == 1 and len(mut_alt) == 1 and mut_ref != mut_alt:
# mutation matches the substitution
if mutation[2] in bases_dict[ref] and mutation[3] in bases_dict[alt]:
return True
def find_matching_motifs(seq, motif, motif_position):
"""
:param seq: DNA sequence
:param motif: specified motif
:param motif_position: position of mutated base in motif, 0-base numbering
:return: generator of matching positions
TODO: SLOW algorithm O(n * m). Need to create a suffix tree with regexp
"""
# print("Looking for motif {} in {}, {}".format(motif, sequence, len(sequence) - len(motif)))
for i in range(len(seq) - len(motif) + 1):
# s = seq[i: i + len(motif)]
# print(s)
for j, c in enumerate(motif):
if seq[i + j][2] not in bases_dict[c]:
break
else:
yield seq[i + motif_position]
def find_matching_bases(seq, ref, motif, motif_position):
"""
:param seq:
:param ref:
:param motif:
:param motif_position:
:return: bases that match mutations
"""
for i in range(motif_position, len(seq) - (len(motif) - motif_position) + 1):
# range excludes border of sequence that may be motifs that don't fit window size
if seq[i][2] in bases_dict[ref]:
yield seq[i]
def make_contingency_table(
array=None,
motif_mutation=None,
no_motif_mutation=None,
motif_no_mutation=None,
no_motif_no_mutation=None):
""" Make a 2x2 contingency table out of a numpy array or four integers"""
if array is not None:
assert isinstance(array, np.ndarray)
assert array.shape == (2, 2)
else:
array = np.array([
[motif_mutation, no_motif_mutation],
[motif_no_mutation, no_motif_no_mutation]
])
contingency_table = pd.DataFrame(array)
contingency_table.columns = ["motif", "no motif"]
contingency_table.index = ["mutation", "no mutation"]
return contingency_table
def process_mutations(mutations, motif, motif_position, ref, alt, range_size, strand, stat_type=None):
"""
:param mutations: mutations to be analyzed
:param motif: specified motif to search for
:param motif_position: location of mutation in motif, 0-base numbering from left of motif
:param ref: base pre-mutation
:param alt: base post-mutation
:param range_size: how far in the motif to search for
:param strand: strand motif should be searched on
:param stat_type: type of pvalue: Fisher's (default) or Chi-Square
:param dump_matches: an optional file handle to save all mutations matching the motif regardless of their significance
:return: (results summary disctionary, data_dump with stored_data or None if dump_matches is None)
"""
assert range_size >= 0
assert len(ref) == 1
assert len(alt) == 1
assert 0 <= motif_position < len(motif)
assert len(set(strand) - set("ATN")) == 0, "[process_mutations] only A, T, N allowed in strand parameter"
matching_bases = set()
matching_motifs = set()
matching_mutated_motifs = set()
matching_mutated_bases = set()
# extra loop for sample in sample list
for chrom, pos, transcript_strand, x, y, seq in mutations:
# extract the longest sequence we would ever need (motif + range_size); range size = # bases outside mutation
mutation = chrom, pos, x, y
rev_seq = get_rev_comp_seq(seq)
# assuming that all mutations are reported in '+' reference strand
if strand == 'A' or (strand == 'T' and transcript_strand == '+') or (strand == 'N' and transcript_strand == '-'):
# not mutated:
for ref_match in find_matching_bases(seq, ref, motif, motif_position):
matching_bases.add(ref_match[0:2])
for motif_match in find_matching_motifs(seq, motif, motif_position):
matching_motifs.add(motif_match[0:2])
# mutated:
if mutated_base(mutation, ref, alt):
# m = (mutation[0], mutation[1], mutation[2], "+")
matching_mutated_bases.add(mutation[0:2])
context_of_mutation = seq[range_size - motif_position: range_size - motif_position + len(motif)]
for motif_match in find_matching_motifs(context_of_mutation, motif, motif_position):
matching_mutated_motifs.add(motif_match[0:2])
if strand == 'A' or (strand == 'T' and transcript_strand == '-') or (strand == 'N' and transcript_strand == '+'):
# rev compl: not mutated:
for ref_match in find_matching_bases(rev_seq, ref, motif, motif_position):
matching_bases.add(ref_match[0:2])
for motif_match in find_matching_motifs(rev_seq, motif, motif_position):
matching_motifs.add(motif_match[0:2])
# rev compl: mutated:
if mutated_base(mutation, complementary_extended_nucleotide[ref], complementary_extended_nucleotide[alt]):
# m = (mutation[0], mutation[1], mutation[2], "-")
matching_mutated_bases.add(mutation[0:2])
# rev comp:
context_of_mutation = rev_seq[range_size - motif_position: range_size - motif_position + len(motif)]
for motif_match in find_matching_motifs(context_of_mutation, motif, motif_position):
matching_mutated_motifs.add(motif_match[0:2])
motif_mutation_count = len(matching_mutated_motifs) # bases mutated in motif
stat_mutation_count = len(matching_mutated_bases - matching_mutated_motifs) # bases mutated not in motif
stat_motif_count = len(matching_motifs - matching_mutated_motifs) # bases not mutated in motif
stat_ref_count = len(matching_bases - (matching_motifs | matching_mutated_bases)) # bases not mutated not in motif
# number of A[T>G]T occurrences motif_mutation_count
# / number of [T>G] occurrences stat_mutation_count + motif_mutation_count
# ----------
# number of ATT occurrences in DNA context stat_motif_count
# / number of T occurrences in DNA context stat_ref_count + stat_motif_count
contingency_table = make_contingency_table(
motif_mutation=motif_mutation_count,
no_motif_mutation=stat_mutation_count,
motif_no_mutation=stat_motif_count,
no_motif_no_mutation=stat_ref_count)
# data={
# "'{}>{}' mutation".format(ref, alt): [stat_mutation_count, motif_mutation_count],
# "no '{}>{}' mutation".format(ref, alt): [stat_ref_count, stat_motif_count]},
# index=("no '{}' motif".format(motif), "'{}' motif".format(motif)))
logger.debug("\n" + contingency_table.to_string() + "\n")
logger.debug("({} / ({} + {}) ) / ({} / ({} + {}))".format(
contingency_table.loc['mutation', 'motif'],
contingency_table.loc['mutation', 'motif'],
contingency_table.loc['mutation', 'no motif'],
contingency_table.loc['no mutation', 'motif'],
contingency_table.loc['no mutation', 'motif'],
contingency_table.loc['no mutation', 'no motif']))
contingency_table = Haldane_correction(contingency_table)
enrichment = risk_ratio = calculate_RR(contingency_table) # enrichment = risk ratio
odds_ratio = calculate_OR(contingency_table)
p_val = get_stats(contingency_table, stat_type)
mut_load = calculate_mutation_load(motif_mutation_count, enrichment)
result = {
'enrichment': enrichment, # AKA risk ratio
'odds_ratio': odds_ratio,
'mutation_load': math.ceil(mut_load),
'pvalue': p_val,
'bases_mutated_in_motif': motif_mutation_count,
'bases_mutated_not_in_motif': stat_mutation_count,
'bases_not_mutated_in_motif': stat_motif_count,
'bases_not_mutated_not_in_motif': stat_ref_count,
'total_mutations': len(mutations)
}
saved_matches = {
'mutation_motif': matching_mutated_motifs
}
return result, saved_matches
|
neksa/mutagene
|
mutagene/motifs/__init__.py
|
__init__.py
|
py
| 17,419 |
python
|
en
|
code
| 3 |
github-code
|
6
|
[
{
"api_name": "logging.getLogger",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "mutagene.io.motifs.get_known_motifs",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "tqdm.tqdm",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "pprint.pformat",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "mutagene.dna.extended_nucleotides",
"line_number": 119,
"usage_type": "name"
},
{
"api_name": "mutagene.dna.nucleotides",
"line_number": 119,
"usage_type": "name"
},
{
"api_name": "numpy.any",
"line_number": 191,
"usage_type": "call"
},
{
"api_name": "numpy.isclose",
"line_number": 191,
"usage_type": "call"
},
{
"api_name": "scipy.stats.fisher_exact",
"line_number": 227,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_number": 227,
"usage_type": "name"
},
{
"api_name": "scipy.stats.chi2_contingency",
"line_number": 234,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_number": 234,
"usage_type": "name"
},
{
"api_name": "statsmodels.stats.multitest.multipletests",
"line_number": 243,
"usage_type": "call"
},
{
"api_name": "mutagene.dna.complementary_nucleotide",
"line_number": 254,
"usage_type": "name"
},
{
"api_name": "mutagene.dna.bases_dict",
"line_number": 269,
"usage_type": "name"
},
{
"api_name": "mutagene.dna.bases_dict",
"line_number": 287,
"usage_type": "name"
},
{
"api_name": "mutagene.dna.bases_dict",
"line_number": 303,
"usage_type": "name"
},
{
"api_name": "numpy.ndarray",
"line_number": 316,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 319,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 323,
"usage_type": "call"
},
{
"api_name": "mutagene.dna.complementary_extended_nucleotide",
"line_number": 385,
"usage_type": "name"
},
{
"api_name": "math.ceil",
"line_number": 437,
"usage_type": "call"
}
] |
8502338250
|
import torch
import torch.nn as nn
from Descriptor import Descriptor
from Recovery_Submodule import R_t, Pyramid_maxout
class TR(nn.Module):
# translucency recovery(TR) module
def __init__(self, input_channel=3, beta=4, gamma=4):
super(TR, self).__init__()
self.D_t = Descriptor(input_channel, gamma)
self.R_t = R_t(385, beta)
def forward(self, x, **kwargs):
f_t = self.D_t(x)
y_, f_c, z_hat, a = self.R_t(x, f_t, **kwargs)
return y_, f_c, z_hat, a
class TR_new(nn.Module):
# A new translucency recovery(TR) module with two descriptors
def __init__(self, input_channel=3, beta=4, gamma=4):
super(TR_new, self).__init__()
self.D_t_1 = Descriptor(input_channel, gamma)
self.D_t_2 = Descriptor(input_channel, gamma)
self.SE = Pyramid_maxout(385, 1, beta)
self.AE = Pyramid_maxout(385, 3, beta)
def forward(self, x, **kwargs):
f_t_1 = self.D_t_1(x)
z_hat = self.SE(f_t_1)
z_hat[z_hat >= 1] = 1
z_hat[z_hat <= 0] = 0
z_hat_ = z_hat.detach()
f_t_2 = self.D_t_2(x)
a = self.AE(f_t_2)
# yield estimated snow-free image y'
y_ = (z_hat_ < 1) * (x - a * z_hat_) / (1 - z_hat_ + 1e-8) + (z_hat_ == 1) * x
y_[y_ >= 1] = 1
y_[y_ <= 0] = 0
# yield feature map f_c
f_c = torch.cat([y_, z_hat_, a], dim=1)
return y_, f_c, z_hat, a
class TR_za(nn.Module):
# A translucency recovery(TR) module predict z\times a
def __init__(self, input_channel=3, beta=4, gamma=4):
super(TR_za, self).__init__()
self.D_t = Descriptor(input_channel, gamma)
self.SE = Pyramid_maxout(385, 1, beta)
self.SAE = Pyramid_maxout(385, 3, beta)
def forward(self, x, **kwargs):
f_t = self.D_t(x)
z_hat = self.SE(f_t)
za = self.SAE(f_t)
z_hat[z_hat >= 1] = 1
z_hat[z_hat <= 0] = 0
za[za >= 1] = 1
za[za <= 0] = 0
# yield estimated snow-free image y'
y_ = (z_hat < 1) * (x - za) / (1 - z_hat + 1e-8) + (z_hat == 1) * x
y_[y_ >= 1] = 1
y_[y_ <= 0] = 0
# yield feature map f_c
f_c = torch.cat([y_, z_hat, za], dim=1)
return y_, f_c, z_hat, za
class RG(nn.Module):
# the residual generation (RG) module
def __init__(self, input_channel=7, beta=4, gamma=4):
super(RG, self).__init__()
self.D_r = Descriptor(input_channel, gamma)
block = []
for i in range(beta):
block.append(nn.Conv2d(385, 3, 2 * i + 1, 1, padding=i))
self.conv_module = nn.ModuleList(block)
self.activation = nn.Tanh()
def forward(self, f_c):
f_r = self.D_r(f_c)
for i, module in enumerate(self.conv_module):
if i == 0:
r = module(f_r)
else:
r += r + module(f_r)
r = self.activation(r)
return r
class DesnowNet(nn.Module):
# the DesnowNet
def __init__(self, input_channel=3, beta=4, gamma=4, mode='original'):
super(DesnowNet, self).__init__()
if mode == 'original':
self.TR = TR(input_channel, beta, gamma)
elif mode == 'new_descriptor':
self.TR = TR_new(input_channel, beta, gamma)
elif mode == 'za':
self.TR = TR_za(input_channel, beta, gamma)
else:
raise ValueError("Invalid architectural mode")
self.RG = RG(beta=beta, gamma=gamma)
def forward(self, x, **kwargs):
y_, f_c, z_hat, a = self.TR(x, **kwargs)
r = self.RG(f_c)
y_hat = r + y_
return y_hat, y_, z_hat, a
if __name__ == '__main__':
device = 'cuda'
net = DesnowNet().to(device)
mask = torch.zeros([2, 1, 64, 64]).to(device)
img = torch.zeros([2, 3, 64, 64]).to(device)
y_hat, y_, z_hat, a = net(img, mask=mask)
y_hat.mean().backward()
print("finished")
|
linYDTHU/DesnowNet_Context-Aware_Deep_Network_for_Snow_Removal
|
network/DesnowNet.py
|
DesnowNet.py
|
py
| 3,956 |
python
|
en
|
code
| 15 |
github-code
|
6
|
[
{
"api_name": "torch.nn.Module",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "Descriptor.Descriptor",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "Recovery_Submodule.R_t",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "Descriptor.Descriptor",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "Descriptor.Descriptor",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "Recovery_Submodule.Pyramid_maxout",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "Recovery_Submodule.Pyramid_maxout",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "Descriptor.Descriptor",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "Recovery_Submodule.Pyramid_maxout",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "Recovery_Submodule.Pyramid_maxout",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_number": 69,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "Descriptor.Descriptor",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "torch.nn.ModuleList",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "torch.nn.Tanh",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "torch.nn.Module",
"line_number": 91,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 91,
"usage_type": "name"
},
{
"api_name": "torch.zeros",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "torch.zeros",
"line_number": 116,
"usage_type": "call"
}
] |
35226084140
|
from __future__ import division
# Our Backend for the App!
# Built with Flask
# Import Flask
import flask
import requests
import os
from flask import send_file
import re
import sys
# Create the application
app = flask.Flask(__name__)
# serving home.html
@app.route('/', methods=['GET'])
def serve_page():
return flask.render_template('home.html')
# process query
@app.route('/process_query', methods=['POST'])
def process_query():
data = flask.request.form # is a dictionary
input = data['user_input']
input_in_list = input.split(' ')
return flask.render_template('home.html', same=processInput(input_in_list), og=input)
def processInput(input_in_list):
for s, i in enumerate(input_in_list):
if "bye" in i.lower():
input_in_list[s] = "static/bye.jpg"
if "hello" in i.lower():
input_in_list[s] = "static/hello.png"
if "yes" in i.lower():
input_in_list[s] = "static/yes.png"
if "no" in i.lower():
input_in_list[s] = "static/no.png"
if "please" in i.lower():
input_in_list[s] = "static/please.png"
if "thanks" in i.lower():
input_in_list[s] = "static/thanks.png"
if "who" in i.lower():
input_in_list[s] = "static/who.png"
if "what" in i.lower():
input_in_list[s] = "static/what.png"
if "when" in i.lower():
input_in_list[s] = "static/when.png"
if "where" in i.lower():
input_in_list[s] = "static/where.png"
if "why" in i.lower():
input_in_list[s] = "static/why.png"
if "which" in i.lower():
input_in_list[s] = "static/which.png"
if "how" in i.lower():
input_in_list[s] = "static/how.png"
return input_in_list
def listen_print_loop(responses):
"""Iterates through server responses and prints them.
The responses passed is a generator that will block until a response
is provided by the server.
Each response may contain multiple results, and each result may contain
multiple alternatives; for details, see https://goo.gl/tjCPAU. Here we
print only the transcription for the top alternative of the top result.
In this case, responses are provided for interim results as well. If the
response is an interim one, print a line feed at the end of it, to allow
the next result to overwrite it, until the response is a final one. For the
final one, print a newline to preserve the finalized transcription.
"""
num_chars_printed = 0
for response in responses:
if not response.results:
continue
# The `results` list is consecutive. For streaming, we only care about
# the first result being considered, since once it's `is_final`, it
# moves on to considering the next utterance.
result = response.results[0]
if not result.alternatives:
continue
# Display the transcription of the top alternative.
transcript = result.alternatives[0].transcript
# Display interim results, but with a carriage return at the end of the
# line, so subsequent lines will overwrite them.
#
# If the previous result was longer than this one, we need to print
# some extra spaces to overwrite the previous result
overwrite_chars = ' ' * (num_chars_printed - len(transcript))
if not result.is_final:
sys.stdout.write(transcript + overwrite_chars + '\r')
sys.stdout.flush()
num_chars_printed = len(transcript)
else:
return flask.render_template('home.html', same=processInput("".join(transcript).split(" ")), og="".join(transcript))
# Exit recognition if any of the transcribed phrases could be
# one of our keywords.
if re.search(r'\b(exit|quit)\b', transcript, re.I):
print('Exiting..')
break
num_chars_printed = 0
@app.route('/speech', methods=['GET'])
def main():
# See http://g.co/cloud/speech/docs/languages
# for a list of supported languages.
language_code = 'en-US' # a BCP-47 language tag
if __name__ == '__main__':
app.run(debug=True)
|
manichandra95151/TTSL
|
main.py
|
main.py
|
py
| 4,265 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "flask.Flask",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "flask.request",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "flask.render_template",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "sys.stdout.write",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 98,
"usage_type": "attribute"
},
{
"api_name": "sys.stdout.flush",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 99,
"usage_type": "attribute"
},
{
"api_name": "flask.render_template",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "re.I",
"line_number": 108,
"usage_type": "attribute"
}
] |
24312665302
|
from langdetect import detect
def to_sentences(text):
text = text.replace("\n", " ")
sentences = [s + '.' for s in text.split(".") if s != ""]
return sentences
def divide(text, input_size=5000):
"""
Divide text into chunks of input_size
Args:
text (str): Text to be divided
input_size (int): Size of each chunk
"""
# short input_size if asian
lang = detect(text)
if lang in ['ko', 'ja', 'zh-cn', 'zh-tw', 'zh-hk']:
input_size = min(input_size, 1300)
# divide text by words
text = to_sentences(text)
result = []
temp = ""
for word in text:
if len(temp + word) >= input_size:
result.append(temp)
temp = ""
temp += word
result.append(temp)
return result
|
hyunooss/SSUmmary
|
django-server/ssummary_site/modules/utils.py
|
utils.py
|
py
| 788 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "langdetect.detect",
"line_number": 18,
"usage_type": "call"
}
] |
22807898242
|
# coding: utf8
"""锁Lock 用于避免进程间对shared memory的争夺"""
import multiprocessing
import time
def job(val, num, lo):
lo.acquire() # 取得锁
for _ in range(10):
time.sleep(0.1)
val.value += num
print(val.value)
lo.release() # 释放锁
def multicore():
lo = multiprocessing.Lock() # 创建锁对象
share_memory = multiprocessing.Value("i", 0) # 初始化为0的一个共享int块变量
res1 = multiprocessing.Process(target=job, args=(share_memory, 1, lo))
res2 = multiprocessing.Process(target=job, args=(share_memory, 9, lo))
res1.start()
res2.start()
res1.join()
res2.join()
if __name__ == "__main__":
multicore()
|
sola1121/practice_code
|
python3/对于异步的例子/multiprocessing/6 multiprocessing lock锁.py
|
6 multiprocessing lock锁.py
|
py
| 752 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "time.sleep",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "multiprocessing.Lock",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "multiprocessing.Value",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "multiprocessing.Process",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "multiprocessing.Process",
"line_number": 22,
"usage_type": "call"
}
] |
29074159051
|
from RepSys import Error, config
from RepSys.util import execcmd
from RepSys.VCS import *
from os.path import basename, dirname
from os import chdir, getcwd
import sys
import re
import time
from xml.etree import cElementTree as ElementTree
import subprocess
class GITLogEntry(VCSLogEntry):
def __init__(self, revision, author, date):
VCSLogEntry.__init__(self, revision, author, data)
class GIT(VCS):
def __init__(self):
VCS.__init__(self)
self.vcs_name = "git"
self.vcs_command = config.get("global", "git-command", "git")
self.vcs_supports['clone'] = True
self.env_defaults = {"GIT_SSH": self.vcs_wrapper}
def clone(self, url, targetpath, **kwargs):
if url.split(':')[0].find("svn") < 0:
return VCS.clone(self, url, targetpath, **kwargs)
else:
# To speed things up on huge repositories, we'll just grab all the
# revision numbers for this specific directory and grab these only
# in stead of having to go through each and every revision...
retval, result = execcmd("svn log --stop-on-copy --xml %s" % url)
if retval:
return retval
parser = ElementTree.XMLTreeBuilder()
result = "".join(result.split("\n"))
parser.feed(result)
log = parser.close()
logentries = log.getiterator("logentry")
revisions = []
topurl = dirname(url)
trunk = basename(url)
tags = "releases"
execcmd("git svn init %s --trunk=%s --tags=%s %s" % (topurl, trunk, tags, targetpath), show=True)
chdir(targetpath)
for entry in logentries:
revisions.append(entry.attrib["revision"])
while revisions:
execcmd("git svn fetch -r%d" % int(revisions.pop()), show=True)
cmd = ["svn", "rebase"]
return self._execVcs_success(*cmd, **kwargs)
class SVNLook(VCSLook):
def __init__(self, repospath, txn=None, rev=None):
VCSLook.__init__(self, repospath, txn, rev)
# vim:et:ts=4:sw=4
|
mdkcauldron/proyvinds-repsys
|
RepSys/git.py
|
git.py
|
py
| 2,133 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "RepSys.config.get",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "RepSys.config",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "RepSys.util.execcmd",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "xml.etree.cElementTree.XMLTreeBuilder",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "xml.etree.cElementTree",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "os.path.dirname",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "os.path.basename",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "RepSys.util.execcmd",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "os.chdir",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "RepSys.util.execcmd",
"line_number": 48,
"usage_type": "call"
}
] |
7422093495
|
from operator import add
from itertools import chain, combinations
from functools import reduce
import math
import numpy as np
from scipy import ndimage
from tkinter import *
class GF2(object):
def __init__(self, a=0):
self.value = int(a) & 1
def __add__(self, rhs):
return GF2(self.value + GF2(rhs).value)
def __mul__(self, rhs):
return GF2(self.value * GF2(rhs).value)
def __sub__(self, rhs):
return GF2(self.value - GF2(rhs).value)
def __truediv__(self, rhs):
return GF2(self.value / GF2(rhs).value)
def __repr__(self):
return str(self.value)
def __eq__(self, rhs):
if isinstance(rhs, GF2):
return self.value == rhs.value
return self.value == rhs
def __le__(self, rhs):
if isinstance(rhs, GF2):
return self.value <= rhs.value
return self.value <= rhs
def __lt__(self, rhs):
if isinstance(rhs, GF2):
return self.value < rhs.value
return self.value < rhs
def __int__(self):
return self.value
def __long__(self):
return self.value
GF2array = np.vectorize(GF2)
def gjel(A):
nulldim = 0
for i, row1 in enumerate(A):
pivot = A[i:, i].argmax() + i
if A[pivot, i] == 0:
nulldim = len(A) - i
break
new_row = A[pivot] / A[pivot, i]
A[pivot] = A[i]
row1[:] = new_row
for j, row2 in enumerate(A):
if j == i:
continue
row2[:] -= new_row*A[j, i]
return A, nulldim
def GF2inv(A):
n = len(A)
assert n == A.shape[1], "Matrix must be square"
A = np.hstack([A, np.eye(n)])
B, nulldim = gjel(GF2array(A))
inverse = np.int_(B[-n:, -n:])
E = B[:n, :n]
null_vectors = []
if nulldim > 0:
null_vectors = E[:, -nulldim:]
null_vectors[-nulldim:, :] = GF2array(np.eye(nulldim))
null_vectors = np.int_(null_vectors.T)
return inverse, null_vectors
def lightsoutbase(n):
a = np.eye(n*n)
a = np.reshape(a, (n*n, n, n))
a = np.array(list(map(ndimage.binary_dilation, a)))
return np.reshape(a, (n*n, n*n))
def powerset(iterable):
s = list(iterable)
return chain.from_iterable(combinations(s, r) for r in range(len(s)+1))
class LightsOut(object):
def __init__(self, size=5):
self.n = size
self.base = lightsoutbase(self.n)
self.invbase, self.null_vectors = GF2inv(self.base)
def solve(self, b):
b = np.asarray(b)
assert b.shape[0] == b.shape[1] == self.n, "incompatible shape"
if not self.issolvable(b):
raise ValueError("The given setup is not solvable")
first = np.dot(self.invbase, b.ravel()) & 1
solutions = [(first + reduce(add, nvs, 0)) & 1 for nvs in powerset(self.null_vectors)]
final = min(solutions, key=lambda x: x.sum())
return np.reshape(final, (self.n, self.n))
def issolvable(self, b):
b = np.asarray(b)
assert b.shape[0] == b.shape[1] == self.n, "incompatible shape"
b = b.ravel()
p = [np.dot(x, b) & 1 for x in self.null_vectors]
return not any(p)
def text_to_mat(gridtxt, invert=True):
gridlist = [int(s) for s in list(gridtxt)]
shape = np.sqrt(len(gridlist))
if shape%1 != 0:
print("input matrix is not square.")
return 1
shape = int(shape)
matlist = [gridlist[i: i+shape] for i in range(0, len(gridlist), shape)]
mat = np.array(matlist)
if invert:
mat = 1-mat
return mat
def mat_to_text(mat, invert=False):
s = ""
for i in mat:
for j in i:
if invert:
s += str(1-j)
else:
s += str(j)
return s
def text_solver(gridtxt):
mat_inv = text_to_mat(gridtxt, True)
if type(mat_inv) == int:
return 1
lo = LightsOut(3)
try:
bsol = lo.solve(mat_inv)
except:
print("Error in determining solution")
return 1
return bsol
master = Tk()
master_gridtxt = StringVar(value="000000000")
master.title("DVa's Puzzle Solver")
master.geometry("400x115")
master.resizable(width=False, height=False)
check_size = 25
check_on = PhotoImage(width=check_size, height=check_size)
check_off = PhotoImage(width=check_size, height=check_size)
check_on.put(("green"), to=(0,0,check_size,check_size))
check_off.put(("red"), to=(0,0,check_size,check_size))
label_text = StringVar()
def update_gridtxt():
b_solve['state'] = NORMAL
master_gridtxt.set("")
for i in range(9):
s = str(globals()[f"b_state{i}"].get())
master_gridtxt.set(master_gridtxt.get() + s)
def reset_boxes():
for i in range(9):
globals()[f"b_state{i}"].set(0)
label_text.set("")
b_solve['state'] = NORMAL
def final_wrapper(gridtxt):
mat = text_solver(gridtxt)
gridtxt_final = mat_to_text(mat)
reset_boxes()
for idx, i in enumerate(gridtxt_final):
if i == "1":
globals()[f"b{idx}"].select()
b_solve['state'] = DISABLED
label_text.set("Solved. Shoot the lamps marked with green boxes.")
for i in range(9):
j = i+1
col = i%3
row = math.ceil(j/3)
globals()[f"b_state{i}"] = IntVar()
globals()[f"b{i}"] = Checkbutton(master, variable=globals()[f"b_state{i}"],
image=check_off, selectimage=check_on, indicatoron=False,
onvalue=1, offvalue=0, command=update_gridtxt)
globals()[f"b{i}"].grid(row=row, column=col, padx=1, pady=1)
b_solve = Button(master, text="Solve", command=lambda:final_wrapper(master_gridtxt.get()), anchor="w")
b_solve.grid(row=1, column=4, padx=1, pady=1, sticky="w")
b_reset = Button(master, text="Reset", command=reset_boxes, anchor="w")
b_reset.grid(row=2, column=4, padx=1, pady=1, sticky="w")
lbl = Label(master, textvariable=label_text, anchor="w")
lbl.grid(row=3, column=4, padx=1, pady=1)
master.mainloop()
|
ThaumielSparrow/switch-solver
|
lights_on.py
|
lights_on.py
|
py
| 6,138 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "numpy.vectorize",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "numpy.hstack",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "numpy.eye",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "numpy.int_",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "numpy.eye",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "numpy.int_",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "numpy.eye",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "numpy.reshape",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "scipy.ndimage.binary_dilation",
"line_number": 94,
"usage_type": "attribute"
},
{
"api_name": "scipy.ndimage",
"line_number": 94,
"usage_type": "name"
},
{
"api_name": "numpy.reshape",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "itertools.chain.from_iterable",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "itertools.chain",
"line_number": 100,
"usage_type": "name"
},
{
"api_name": "itertools.combinations",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "numpy.dot",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "functools.reduce",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "operator.add",
"line_number": 118,
"usage_type": "argument"
},
{
"api_name": "numpy.reshape",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "numpy.dot",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "math.ceil",
"line_number": 216,
"usage_type": "call"
}
] |
45375013026
|
"""
include packages
"""
from settings import *
import sqlite3
import discord
from discord import app_commands
import sys
import signal
import deepl
from typing import Optional
from lib import vote as vt
from lib import deepl as dl
"""
Global variables
"""
connection : sqlite3.Connection = sqlite3.connect(DATABASE)
intents : discord.Intents = discord.Intents.all()
client : discord.Client = discord.Client(intents=intents)
tree : app_commands.CommandTree = app_commands.CommandTree(client=client)
"""
Setup
"""
vt.init(connection)
with open(DEEPL_API_KEY) as f:
dl_translator = dl.LoggingTranslator(f.read(), connection=connection)
"""
Commands
"""
@tree.command(
name='test',
description='This is a test'
)
@app_commands.describe(
message='Your message',
hello='Hello message'
)
@app_commands.rename(
message='text'
)
@app_commands.choices(
hello=[
app_commands.Choice(name='Good Morning', value='Good Morning'),
app_commands.Choice(name='Good Afternoon', value='Good Afternoon'),
app_commands.Choice(name='Good Evening', value='Good Evening'),
app_commands.Choice(name='Good Night', value='Good Night')
]
)
@app_commands.guild_only
async def test(ctx: discord.Interaction, message: str, hello: str):
await ctx.response.send_message('This is a test message.\nYour message is ...\n'+message+'\n'+hello)
@tree.command(
name='vote',
description='投票を行う'
)
@app_commands.describe(
title='投票のお題',
visible='投票結果を表示する際に投票先を表示する',
)
@app_commands.choices(
visible=[
app_commands.Choice(name='表示する', value='Yes'),
app_commands.Choice(name='表示しない', value='No')
]
)
@app_commands.guild_only
async def vote_with_any_choices(ctx: discord.Interaction, title: str, visible: str='Yes'):
try:
await ctx.response.send_modal(vt.VoteModal(title=title, visible=visible))
except Exception as e:
print(e.with_traceback(sys.exc_info()[2]))
@tree.command(
name='deepl',
description='DeepL翻訳を使用してテキストを翻訳する(default: Auto→JP)'
)
@app_commands.describe(
text='翻訳するテキスト',
source_language='翻訳前の言語(default: 自動検出)',
target_language='翻訳後の言語(default: 日本語)'
)
@app_commands.choices(
source_language=dl.DcLanguageList.SOURCE,
target_language=dl.DcLanguageList.TARGET
)
async def deepl_translate(ctx: discord.Interaction, text: str, source_language: Optional[str] = None, target_language: str = deepl.Language.JAPANESE):
try:
if source_language == "":
source_language = None
translated_text = dl_translator.translate_text(
ctx=ctx,
text=text,
source_lang=source_language,
target_lang=target_language
)
t = "> " + text.replace("\n", "\n> ") + "\n"
await ctx.response.send_message(t + translated_text.text)
except Exception as e:
print(e.with_traceback(sys.exc_info()[2]))
"""
Events
"""
@client.event
async def on_ready():
print('Bot is ready')
await tree.sync()
"""
Cleanups
"""
def cleanup():
global connection
connection.close()
def signal_handler(signum, frame):
cleanup()
sys.exit(1)
if __name__ == '__main__':
signal.signal(signal.SIGTERM, signal_handler)
try:
with open(TOKEN) as f:
client.run(f.read())
finally:
signal.signal(signal.SIGTERM, signal.SIG_DFL)
cleanup()
|
GrapeJuicer/GrapeBot
|
app/main.py
|
main.py
|
py
| 3,637 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "sqlite3.Connection",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "sqlite3.connect",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "discord.Intents",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "discord.Intents.all",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "discord.Client",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "discord.app_commands.CommandTree",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "discord.app_commands",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "lib.vote.init",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "lib.vote",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "lib.deepl.LoggingTranslator",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "lib.deepl",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "discord.Interaction",
"line_number": 64,
"usage_type": "attribute"
},
{
"api_name": "discord.app_commands.describe",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "discord.app_commands",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "discord.app_commands.rename",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "discord.app_commands",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "discord.app_commands.choices",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "discord.app_commands",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "discord.app_commands.Choice",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "discord.app_commands",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "discord.app_commands.Choice",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "discord.app_commands",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "discord.app_commands.Choice",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "discord.app_commands",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "discord.app_commands.Choice",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "discord.app_commands",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "discord.app_commands.guild_only",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "discord.app_commands",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "discord.Interaction",
"line_number": 84,
"usage_type": "attribute"
},
{
"api_name": "lib.vote.VoteModal",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "lib.vote",
"line_number": 86,
"usage_type": "name"
},
{
"api_name": "sys.exc_info",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "discord.app_commands.describe",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "discord.app_commands",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "discord.app_commands.choices",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "discord.app_commands",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "discord.app_commands.Choice",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "discord.app_commands",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "discord.app_commands.Choice",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "discord.app_commands",
"line_number": 80,
"usage_type": "name"
},
{
"api_name": "discord.app_commands.guild_only",
"line_number": 83,
"usage_type": "attribute"
},
{
"api_name": "discord.app_commands",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "discord.Interaction",
"line_number": 105,
"usage_type": "attribute"
},
{
"api_name": "typing.Optional",
"line_number": 105,
"usage_type": "name"
},
{
"api_name": "deepl.Language",
"line_number": 105,
"usage_type": "attribute"
},
{
"api_name": "sys.exc_info",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "discord.app_commands.describe",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "discord.app_commands",
"line_number": 96,
"usage_type": "name"
},
{
"api_name": "discord.app_commands.choices",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "discord.app_commands",
"line_number": 101,
"usage_type": "name"
},
{
"api_name": "lib.deepl.DcLanguageList",
"line_number": 102,
"usage_type": "attribute"
},
{
"api_name": "lib.deepl",
"line_number": 102,
"usage_type": "name"
},
{
"api_name": "lib.deepl.DcLanguageList",
"line_number": 103,
"usage_type": "attribute"
},
{
"api_name": "lib.deepl",
"line_number": 103,
"usage_type": "name"
},
{
"api_name": "sys.exit",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "signal.signal",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "signal.SIGTERM",
"line_number": 152,
"usage_type": "attribute"
},
{
"api_name": "signal.signal",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "signal.SIGTERM",
"line_number": 157,
"usage_type": "attribute"
},
{
"api_name": "signal.SIG_DFL",
"line_number": 157,
"usage_type": "attribute"
}
] |
18801853357
|
import pytest
from base.client_base import TestcaseBase
from common import common_func as cf
from common import common_type as ct
from common.common_type import CaseLabel
from utils.util_log import test_log as log
# customer rg
rg_name_0 = "RG_0"
rg_name_1 = "RG_1"
# coll name
coll_name_1 = "ResourceGroup_111"
coll_name_2 = "ResourceGroup_222"
# resource group info of 4 qns
resource_group_info = [
{"name": rg_name_0, "available_node": 1, "capacity": 1, "loaded_replica": {coll_name_1: 1}},
{"name": rg_name_1, "available_node": 1, "capacity": 1, "loaded_replica": {coll_name_1: 1}},
{"name": ct.default_resource_group_name, "available_node": 2,
"capacity": ct.default_resource_group_capacity, "loaded_replica": {coll_name_2: 2}}
]
class TestChaosRG(TestcaseBase):
""" Test case of end to end"""
def teardown_method(self, method):
log.info(("*" * 35) + " teardown " + ("*" * 35))
log.info("[teardown_method] Start teardown test case %s..." %
method.__name__)
log.info("skip drop collection")
@pytest.mark.tags(CaseLabel.L3)
def test_milvus_resource_group(self):
nb = 10000
# collection rg map
collection_rg_map = {
coll_name_1: {"resource_groups": [rg_name_0, rg_name_1], "replica_number": 2},
coll_name_2: {"resource_groups": [ct.default_resource_group_name], "replica_number": 2}
}
self._connect()
# create RG_0, RG_1, transfer 1 node to RG_0, 1 node to RG_1
for rg_info in resource_group_info:
rg_name = rg_info["name"]
if rg_name != ct.default_resource_group_name:
_, create_rg_res = self.utility_wrap.create_resource_group(rg_name)
assert create_rg_res
log.info(f"[ResourceGroup] Create rg {rg_name} done")
self.utility_wrap.transfer_node(source=ct.default_resource_group_name, target=rg_name,
num_node=rg_info["available_node"])
log.info(
f'[ResourceGroup] Transfer {rg_info["available_node"]} nodes from {ct.default_resource_group_name} to {rg_name} done')
# verify RGs
resource_groups, _ = self.utility_wrap.list_resource_groups()
assert len(resource_groups) == len(resource_group_info)
assert all([rg_info["name"] in resource_groups for rg_info in resource_group_info])
for rg_info in resource_group_info:
rg_info = {"name": rg_info["name"],
"capacity": rg_info["capacity"],
"num_available_node": rg_info["available_node"],
"num_loaded_replica": {},
"num_outgoing_node": {},
"num_incoming_node": {}
}
desc_rg_info, _ = self.utility_wrap.describe_resource_group(name=rg_info["name"],
check_task=ct.CheckTasks.check_rg_property,
check_items=rg_info)
log.info(f'[ResourceGroup] Rg of {rg_info["name"]} info is: {desc_rg_info}')
# prepare collection C1, C2
# create
data = cf.gen_default_dataframe_data(nb=nb)
index_params = {"index_type": "HNSW", "metric_type": "L2", "params": {"M": 48, "efConstruction": 500}}
for coll_name in coll_name_1, coll_name_2:
# create
collection_w = self.init_collection_wrap(name=coll_name, active_trace=True)
log.info(f"create collection {collection_w.name} done")
entities = collection_w.num_entities
# insert
_, res = collection_w.insert(data)
assert res
log.info(f"insert {nb} entities done")
# flush
_, check_result = collection_w.flush(timeout=180)
assert check_result
assert collection_w.num_entities == nb + entities
entities = collection_w.num_entities
log.info(f"flush done with entities: {entities}")
# index
index, _ = collection_w.create_index(field_name=ct.default_float_vec_field_name,
index_params=index_params,
index_name=cf.gen_unique_str())
index, _ = collection_w.create_index(field_name=ct.default_string_field_name,
index_params={},
index_name=cf.gen_unique_str())
index_infos = [index.to_dict() for index in collection_w.indexes]
log.info(f"index info: {index_infos}")
# load coll_rg_a, 2 replicas -> RG_0, RG_1
# load coll_rg_b, 2 replicas -> default_RG
collection_w.load(replica_number=collection_rg_map[coll_name]["replica_number"],
_resource_groups=collection_rg_map[coll_name]["resource_groups"])
# show query segment info
segment_info, _ = self.utility_wrap.get_query_segment_info(collection_w.name)
log.info(f"{collection_w.name} segment info: {segment_info}")
# show replicas info
replicas, _ = collection_w.get_replicas()
log.info(f"{collection_w.name} replica info: {replicas}")
# search
search_vectors = cf.gen_vectors(ct.default_nq, ct.default_dim)
search_params = {"metric_type": "L2", "params": {"ef": 64}}
search_res, _ = collection_w.search(data=search_vectors,
anns_field=ct.default_float_vec_field_name,
param=search_params, limit=ct.default_limit, expr="int64 >= 0")
assert len(search_res) == ct.default_nq
assert len(search_res[0]) == ct.default_limit
# query and delete
term_expr = f'{ct.default_int64_field_name} < 100'
query_res, _ = collection_w.query(term_expr)
assert len(query_res) == 100
delete_expr = f'{ct.default_int64_field_name} in {[i for i in range(100)]}'
collection_w.delete(delete_expr)
collection_w.query(term_expr, check_task=ct.CheckTasks.check_query_empty)
# verify rg replica info
for rg_info in resource_group_info:
rg_info = {"name": rg_info["name"],
"capacity": rg_info["capacity"],
"num_available_node": rg_info["available_node"],
"num_loaded_replica": rg_info["loaded_replica"],
"num_outgoing_node": {},
"num_incoming_node": {}
}
desc_rg_info_2, _ = self.utility_wrap.describe_resource_group(name=rg_info["name"],
check_task=ct.CheckTasks.check_rg_property,
check_items=rg_info)
log.info(f'[ResourceGroup] Rg of {rg_info["name"]} info is: {desc_rg_info_2}')
@pytest.mark.tags(CaseLabel.L3)
def test_verify_milvus_resource_group(self):
self._connect()
# verify collection exist
all_collections, _ = self.utility_wrap.list_collections()
assert all(coll_name in all_collections for coll_name in [coll_name_1, coll_name_2])
# verify resource groups
for rg_info in resource_group_info:
rg_info = {"name": rg_info["name"],
"capacity": rg_info["capacity"],
"num_available_node": rg_info["available_node"],
"num_loaded_replica": rg_info["loaded_replica"],
"num_outgoing_node": {},
"num_incoming_node": {}
}
desc_rg_info, _ = self.utility_wrap.describe_resource_group(name=rg_info["name"],
check_task=ct.CheckTasks.check_rg_property,
check_items=rg_info)
log.info(f'[ResourceGroup] Rg of {rg_info["name"]} info is: {desc_rg_info}')
# search
for coll_name in coll_name_2, coll_name_1:
# get query segment info
segment, _ = self.utility_wrap.get_query_segment_info(coll_name)
log.info(f"{coll_name} query segment info: {segment}")
# get replicas
collection_w = self.init_collection_wrap(name=coll_name, active_trace=True)
replicas, _ = collection_w.get_replicas(check_task=ct.CheckTasks.check_nothing)
log.info(f"{coll_name} replicas: {replicas}")
# search
for i in range(100):
search_vectors = cf.gen_vectors(ct.default_nq, ct.default_dim)
search_params = {"metric_type": "L2", "params": {"ef": 64}}
search_res, _ = collection_w.search(data=search_vectors,
anns_field=ct.default_float_vec_field_name,
param=search_params, limit=ct.default_limit, expr="int64 >= 0")
assert len(search_res) == ct.default_nq
assert len(search_res[0]) == ct.default_limit
# show query segment info finally
segment_2, _ = self.utility_wrap.get_query_segment_info(coll_name)
log.info(f"{coll_name} query segment info: {segment_2}")
# show replicas finally
replicas_2, _ = collection_w.get_replicas()
log.info(f"{coll_name} replicas: {replicas_2}")
|
milvus-io/milvus
|
tests/python_client/chaos/testcases/test_chaos_resource_group.py
|
test_chaos_resource_group.py
|
py
| 9,886 |
python
|
en
|
code
| 24,190 |
github-code
|
6
|
[
{
"api_name": "common.common_type.default_resource_group_name",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "common.common_type",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "common.common_type.default_resource_group_capacity",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "common.common_type",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "base.client_base.TestcaseBase",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "utils.util_log.test_log.info",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "utils.util_log.test_log",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "utils.util_log.test_log.info",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "utils.util_log.test_log",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "utils.util_log.test_log.info",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "utils.util_log.test_log",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "common.common_type.default_resource_group_name",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "common.common_type",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "common.common_type.default_resource_group_name",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "common.common_type",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "utils.util_log.test_log.info",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "utils.util_log.test_log",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "common.common_type.default_resource_group_name",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "common.common_type",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "utils.util_log.test_log.info",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "utils.util_log.test_log",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "common.common_type.default_resource_group_name",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "common.common_type",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "common.common_type.CheckTasks",
"line_number": 71,
"usage_type": "attribute"
},
{
"api_name": "common.common_type",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "utils.util_log.test_log.info",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "utils.util_log.test_log",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "common.common_func.gen_default_dataframe_data",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "common.common_func",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "utils.util_log.test_log.info",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "utils.util_log.test_log",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "utils.util_log.test_log.info",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "utils.util_log.test_log",
"line_number": 89,
"usage_type": "name"
},
{
"api_name": "utils.util_log.test_log.info",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "utils.util_log.test_log",
"line_number": 96,
"usage_type": "name"
},
{
"api_name": "common.common_type.default_float_vec_field_name",
"line_number": 99,
"usage_type": "attribute"
},
{
"api_name": "common.common_type",
"line_number": 99,
"usage_type": "name"
},
{
"api_name": "common.common_func.gen_unique_str",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "common.common_func",
"line_number": 101,
"usage_type": "name"
},
{
"api_name": "common.common_type.default_string_field_name",
"line_number": 102,
"usage_type": "attribute"
},
{
"api_name": "common.common_type",
"line_number": 102,
"usage_type": "name"
},
{
"api_name": "common.common_func.gen_unique_str",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "common.common_func",
"line_number": 104,
"usage_type": "name"
},
{
"api_name": "utils.util_log.test_log.info",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "utils.util_log.test_log",
"line_number": 106,
"usage_type": "name"
},
{
"api_name": "utils.util_log.test_log.info",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "utils.util_log.test_log",
"line_number": 115,
"usage_type": "name"
},
{
"api_name": "utils.util_log.test_log.info",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "utils.util_log.test_log",
"line_number": 119,
"usage_type": "name"
},
{
"api_name": "common.common_func.gen_vectors",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "common.common_func",
"line_number": 122,
"usage_type": "name"
},
{
"api_name": "common.common_type.default_nq",
"line_number": 122,
"usage_type": "attribute"
},
{
"api_name": "common.common_type",
"line_number": 122,
"usage_type": "name"
},
{
"api_name": "common.common_type.default_dim",
"line_number": 122,
"usage_type": "attribute"
},
{
"api_name": "common.common_type.default_float_vec_field_name",
"line_number": 125,
"usage_type": "attribute"
},
{
"api_name": "common.common_type",
"line_number": 125,
"usage_type": "name"
},
{
"api_name": "common.common_type.default_limit",
"line_number": 126,
"usage_type": "attribute"
},
{
"api_name": "common.common_type",
"line_number": 126,
"usage_type": "name"
},
{
"api_name": "common.common_type.default_nq",
"line_number": 127,
"usage_type": "attribute"
},
{
"api_name": "common.common_type",
"line_number": 127,
"usage_type": "name"
},
{
"api_name": "common.common_type.default_limit",
"line_number": 128,
"usage_type": "attribute"
},
{
"api_name": "common.common_type",
"line_number": 128,
"usage_type": "name"
},
{
"api_name": "common.common_type.default_int64_field_name",
"line_number": 131,
"usage_type": "attribute"
},
{
"api_name": "common.common_type",
"line_number": 131,
"usage_type": "name"
},
{
"api_name": "common.common_type.default_int64_field_name",
"line_number": 135,
"usage_type": "attribute"
},
{
"api_name": "common.common_type",
"line_number": 135,
"usage_type": "name"
},
{
"api_name": "common.common_type.CheckTasks",
"line_number": 137,
"usage_type": "attribute"
},
{
"api_name": "common.common_type",
"line_number": 137,
"usage_type": "name"
},
{
"api_name": "common.common_type.CheckTasks",
"line_number": 149,
"usage_type": "attribute"
},
{
"api_name": "common.common_type",
"line_number": 149,
"usage_type": "name"
},
{
"api_name": "utils.util_log.test_log.info",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "utils.util_log.test_log",
"line_number": 151,
"usage_type": "name"
},
{
"api_name": "pytest.mark.tags",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "common.common_type.CaseLabel.L3",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "common.common_type.CaseLabel",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "common.common_type.CheckTasks",
"line_number": 171,
"usage_type": "attribute"
},
{
"api_name": "common.common_type",
"line_number": 171,
"usage_type": "name"
},
{
"api_name": "utils.util_log.test_log.info",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "utils.util_log.test_log",
"line_number": 173,
"usage_type": "name"
},
{
"api_name": "utils.util_log.test_log.info",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "utils.util_log.test_log",
"line_number": 179,
"usage_type": "name"
},
{
"api_name": "common.common_type.CheckTasks",
"line_number": 183,
"usage_type": "attribute"
},
{
"api_name": "common.common_type",
"line_number": 183,
"usage_type": "name"
},
{
"api_name": "utils.util_log.test_log.info",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "utils.util_log.test_log",
"line_number": 184,
"usage_type": "name"
},
{
"api_name": "common.common_func.gen_vectors",
"line_number": 188,
"usage_type": "call"
},
{
"api_name": "common.common_func",
"line_number": 188,
"usage_type": "name"
},
{
"api_name": "common.common_type.default_nq",
"line_number": 188,
"usage_type": "attribute"
},
{
"api_name": "common.common_type",
"line_number": 188,
"usage_type": "name"
},
{
"api_name": "common.common_type.default_dim",
"line_number": 188,
"usage_type": "attribute"
},
{
"api_name": "common.common_type.default_float_vec_field_name",
"line_number": 191,
"usage_type": "attribute"
},
{
"api_name": "common.common_type",
"line_number": 191,
"usage_type": "name"
},
{
"api_name": "common.common_type.default_limit",
"line_number": 192,
"usage_type": "attribute"
},
{
"api_name": "common.common_type",
"line_number": 192,
"usage_type": "name"
},
{
"api_name": "common.common_type.default_nq",
"line_number": 193,
"usage_type": "attribute"
},
{
"api_name": "common.common_type",
"line_number": 193,
"usage_type": "name"
},
{
"api_name": "common.common_type.default_limit",
"line_number": 194,
"usage_type": "attribute"
},
{
"api_name": "common.common_type",
"line_number": 194,
"usage_type": "name"
},
{
"api_name": "utils.util_log.test_log.info",
"line_number": 198,
"usage_type": "call"
},
{
"api_name": "utils.util_log.test_log",
"line_number": 198,
"usage_type": "name"
},
{
"api_name": "utils.util_log.test_log.info",
"line_number": 202,
"usage_type": "call"
},
{
"api_name": "utils.util_log.test_log",
"line_number": 202,
"usage_type": "name"
},
{
"api_name": "pytest.mark.tags",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 153,
"usage_type": "attribute"
},
{
"api_name": "common.common_type.CaseLabel.L3",
"line_number": 153,
"usage_type": "attribute"
},
{
"api_name": "common.common_type.CaseLabel",
"line_number": 153,
"usage_type": "name"
}
] |
24091654897
|
from Film import Film
from Forgalmazo import Forgalmazo
import datetime
def fajl_beolvas():
filmek = []
fp = open('nyitohetvege.txt', 'r', encoding='utf-8')
lines = fp.readlines()
fp.close()
for line in lines[1:]:
n_line = line.rstrip()
(eredetiCim, magyarCim,bemutato,forgalmazo,
bevel,latogato) = n_line.split(';')
film = Film(eredetiCim, magyarCim,bemutato,forgalmazo,
bevel,latogato)
filmek.append(film)
return filmek
def feladat3(filmek):
print('3. feladat: Filmek száma az állományban: ', end='')
filmek_szama = len(filmek)
print(filmek_szama, 'db')
def feladat4(filmek):
print('4. feladat: UIP Duna Film forgalmazó 1. ', end='')
print('hetes bevételeinek összege: ', end='')
osszeg=0
for film in filmek:
if film.forgalmazo == 'UIP':
osszeg+=int(film.bevel)
print("{:,}".format(osszeg), 'Ft')
def feladat5(filmek):
print('5. feladat: Legtöbb látogató az első héten:')
max_film = filmek[0]
for film in filmek:
if int(film.latogato) > int(max_film.latogato):
max_film = film
print('\tEredeti cím:', max_film.eredetiCim)
print('\tMagyar cím:', max_film.magyarCim)
print('\tForgalmazó:', max_film.forgalmazo)
print('\tBevétel az első héten:', max_film.bevel, 'Ft')
print('\tLátogatók száma:', max_film.latogato, 'fő')
def tartalmazTeszt(eredetiCim, magyarCim):
eredetiTartalmaz=False
if 'W' in eredetiCim:
eredetiTartalmaz=True
if 'w' in eredetiCim:
eredetiTartalmaz=True
magyarTartalmazza=False
if 'W' in magyarCim:
magyarTartalmazza=True
if 'w' in magyarCim:
magyarTartalmazza=True
if eredetiTartalmaz and magyarTartalmazza:
return True
else:
return False
def feladat6(filmek):
print('6. feladat: ', end='')
n=len(filmek)
i=0
while (i<n and
not tartalmazTeszt(filmek[i].eredetiCim, filmek[i].magyarCim)):
i+=1
if i<n:
print("Ilyen film volt!")
else:
print("Ilyen film nem volt!")
def forgalmazoTeszt(forgalmazok, forgalmazo):
n=len(forgalmazok)
i=0
while i<n and forgalmazok[i].nev != forgalmazo:
i+=1
if i<n:
return True
else:
return False
def feladat7(filmek):
mezonevek = 'forgalmazo;filmekSzama\n'
forgalmazok = []
for film in filmek:
if not forgalmazoTeszt(forgalmazok, film.forgalmazo):
forgalmazo = Forgalmazo(film.forgalmazo)
forgalmazok.append(forgalmazo)
else:
n=len(forgalmazok)
for i in range(0, n):
if forgalmazok[i].nev == film.forgalmazo:
forgalmazok[i].filmek += 1
fp = open('stat.csv', 'w', encoding='utf-8')
fp.write(mezonevek)
for forgalmazo in forgalmazok:
if forgalmazo.filmek>1:
fp.write(forgalmazo.nev + ';' + str(forgalmazo.filmek) + '\n')
fp.close()
def feladat8(filmek):
print('8. feladat: A leghosszabb időszak két ', end='')
print('InterCom-os bemutató között: ', end='')
elsoBemutato = None
max_kul = 0
for film in filmek:
if film.forgalmazo == 'InterCom':
isoDatum = film.bemutato.replace('.', '-')
if elsoBemutato == None:
elsoBemutato=datetime.date.fromisoformat(isoDatum)
else:
kovBemutato = datetime.date.fromisoformat(isoDatum)
kul = kovBemutato - elsoBemutato
if kul.total_seconds() > max_kul:
max_kul=kul.total_seconds()
elsoBemutato = kovBemutato
nap = max_kul // (24 * 3600)
print(int(nap), 'nap')
filmek = fajl_beolvas()
# ~ feladat3(filmek)
# ~ feladat4(filmek)
# ~ feladat5(filmek)
# ~ feladat6(filmek)
# ~ feladat7(filmek)
feladat8(filmek)
|
janos01/esti2020Python
|
gyakorlo/Nyito/src/OpeningWeekend.py
|
OpeningWeekend.py
|
py
| 3,940 |
python
|
hu
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "Film.Film",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "Forgalmazo.Forgalmazo",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "datetime.date.fromisoformat",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 116,
"usage_type": "attribute"
},
{
"api_name": "datetime.date.fromisoformat",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 118,
"usage_type": "attribute"
}
] |
71119888509
|
import matplotlib.pyplot as plt
import numpy as np
# ~~~ DEFINE DATA ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
np.random.seed(1337)
n = 1000000
x = np.random.standard_normal(n)
y = x + .5 * np.random.standard_normal(n)
hist, xedges, yedges = np.histogram2d(x, y, bins=100, density=True)
hist[hist == 0] = None
t = np.linspace(0, 3 * np.pi, 1000)
style = 'mpl'
# ~~~ PLOT LINEAR ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
fig, ax = plt.subplots()
plt.plot(t, np.sin(t), t, np.cos(t), t, 2 * np.cos(t))
plt.tight_layout()
plt.savefig(f'gallery/{style}_plot.png')
plt.close()
# legend
fig, ax = plt.subplots()
plt.plot(t, np.sin(t), label='sin')
plt.plot(t, np.cos(t), label='cos')
plt.plot(t, 2 * np.cos(t), label='2cos')
plt.legend(title='function:')
plt.tight_layout()
plt.savefig(f'gallery/{style}_plot_legend.png')
plt.close()
# mulitple subgallery
fig, axs = plt.subplots(3, 1, sharex=True, gridspec_kw={'hspace': 0.000})
axs[0].plot(t, np.sin(t))
axs[1].plot(t[::20], np.cos(t[::20]), 'o-')
axs[2].plot(t, 2 * np.cos(t), t, np.sin(t))
plt.tight_layout()
plt.savefig(f'gallery/{style}_plot_multiple.png')
plt.close()
# ~~~ PLOT IMSHOW ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
fig, ax = plt.subplots()
plt.imshow(hist)
plt.tight_layout()
plt.savefig(f'gallery/{style}_imshow.png')
plt.close()
# cbar
fig, ax = plt.subplots()
im = plt.imshow(hist)
plt.colorbar(im)
plt.tight_layout()
plt.savefig(f'gallery/{style}_imshow_cbar.png')
plt.close()
|
braniii/prettypyplot
|
gallery/comparison_mpl.py
|
comparison_mpl.py
|
py
| 1,505 |
python
|
en
|
code
| 4 |
github-code
|
6
|
[
{
"api_name": "numpy.random.seed",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.standard_normal",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.standard_normal",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "numpy.histogram2d",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "numpy.sin",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.tight_layout",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.close",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "numpy.sin",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "numpy.cos",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "numpy.cos",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.tight_layout",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.close",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "numpy.sin",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.tight_layout",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.close",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.tight_layout",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.close",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.colorbar",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.tight_layout",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.close",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 55,
"usage_type": "name"
}
] |
19788096058
|
from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple
from uuid import uuid4
import pygame
from .clock import Clock, clock
from .keyboard import Keyboard
from .screen import Screen
from .utils.event_dispatcher import EventDispatcher
if TYPE_CHECKING:
from .application import Application
class Scene(EventDispatcher):
"""
The idea and the original code was taken from [EzPyGame](https://github.com/Mahi/EzPyGame)
An isolated scene which can be ran by an application.
Create your own scene by subclassing and overriding any methods.
Example:
```
class Menu(Scene):
def __init__(self):
self.font = pygame.font.Font(...)
def on_enter(self, previous_scene):
self.title = 'Main Menu'
self.resolution = (640, 480)
self.update_rate = 30
def draw(self, screen):
pygame.draw.rect(...)
text = self.font.render(...)
screen.blit(text, ...)
def handle_event(self, event):
if event.type == pygame.MOUSEBUTTONUP:
if event.button == 1:
game_size = self._get_game_size(event.pos)
self.change_scene(Game(game_size))
def _get_game_size(self, mouse_pos_upon_click):
...
class Game(pgz.Scene):
title = 'The Game!'
resolution = (1280, 720)
update_rate = 60
def __init__(self, size):
super().__init__()
self.size = size
self.player = ...
...
def on_enter(self, previous_scene):
super().on_enter(previous_scene)
self.previous_scene = previous_scene
def draw(self, screen):
self.player.draw(screen)
for enemy in self.enemies:
...
def update(self, dt):
self.player.move(dt)
...
if self.player.is_dead():
self.change_scene(self.previous_scene)
elif self.player_won():
self.change_scene(...)
def handle_event(self, event):
... # Player movement etc.
```
The above two classes use different approaches for changing
the application's settings when the scene is entered:
1. Manually set them in `on_enter`, as seen in `Menu`
2. Use class variables, as I did with `Game`
When using class variables (2), you can leave out any setting
(defaults to `None`) to not override that particular setting.
If you override `on_enter` in the subclass, you must call
`super().on_enter(previous_scene)` to use the class variables.
These settings can further be overridden in individual instances:
```
my_scene0 = MyScene()
my_scene0.resolution = (1280, 720)
my_scene1 = MyScene(title='My Second Awesome Scene')
```
Example:
Shortcuts foe event gandling while `Scene` subclassing.
```
def on_mouse_up(self, pos, button):
# Override this for easier events handling.
pass
def on_mouse_down(self, pos, button):
# Override this for easier events handling.
pass
def on_mouse_move(self, pos):
# Override this for easier events handling.
pass
def on_key_down(self, key):
# Override this for easier events handling.
pass
def on_key_up(self, key):
# Override this for easier events handling.
pass
```
"""
_title: Optional[str] = None
_resolution: Optional[Tuple[int, int]] = None
_update_rate: Optional[int] = None
def __init__(self, title: Optional[str] = None, resolution=None, update_rate: Optional[int] = None) -> None:
self._application: Optional["Application"] = None
if title is not None:
self._title = title
if resolution is not None:
self._resolution = resolution
if update_rate is not None:
self._update_rate = update_rate
self._keyboard = Keyboard()
# Client data is the data was provided by the client during the handshake: it's usually stuff like player name, avatar, etc
self._client_data: Dict[str, Any] = {}
# The scene UUID is used for communication
self._scene_uuid = str(uuid4())
@property
def scene_uuid(self) -> str:
"""
Get scene UUID.
"""
return self._scene_uuid
def set_client_data(self, client_data: Dict[str, Any]) -> None:
self._client_data = client_data
@property
def client_data(self) -> Dict[str, Any]:
"""
Get data provided by client side.
"""
return self._client_data
def change_scene(self, new_scene: Optional["Scene"]) -> None:
if not self._application:
raise Exception("Application was not configured properly.")
self._application.change_scene(new_scene)
@property
def title(self) -> str:
"""Get application title
Returns:
str: application title
"""
if not self._application:
raise Exception("Application was not configured properly.")
return self._application.title
@title.setter
def title(self, value: str) -> None:
"""Change application title
Args:
value (str): application title to set
"""
if not self._application:
print("Warning: application was not configured - 'title' setting was ignored")
return
self._application.title = value
@property
def resolution(self) -> Tuple[int, int]:
"""Get application screen resolution
Returns:
Tuple[int, int]: application screen resolution
"""
if not self._application:
raise Exception("Application was not configured properly.")
return self._application.resolution
@resolution.setter
def resolution(self, value: Tuple[int, int]) -> None:
"""Change application screen resolution
Args:
value (Tuple[int, int]): application screen resolution to use
"""
if not self._application:
print("Warning: application was not configured - 'resolution' setting was ignored")
return
self._application.resolution = value
@property
def update_rate(self) -> int:
"""Get application update rate
Returns:
int: application update rate
"""
if not self._application:
raise Exception("Application was not configured properly.")
return self._application.update_rate
@update_rate.setter
def update_rate(self, value: int) -> None:
"""Change application update rate
Args:
value (int): application update rate to set
"""
if not self._application:
print("Warning: application was not configured - 'update_rate' setting was ignored")
return
self._application.update_rate = value
@property
def clock(self) -> Clock:
"""
Get `Clock` object.
Actually returns the global clock object.
Returns:
Clock: clock object
"""
return clock
@property
def keyboard(self) -> Keyboard:
"""
Get `Keyboard` object.
Returns:
Keyboard: keyboard object
"""
return self._keyboard
def draw(self, screen: Screen) -> None:
"""
Override this with the scene drawing.
Args:
screen (Screen): screen to draw the scene on
"""
def update(self, dt: float) -> None:
"""
Override this with the scene update tick.
Args:
dt (float): time in milliseconds since the last update
"""
def handle_event(self, event: pygame.event.Event) -> None:
"""
Override this to handle an event in the scene.
All of `pygame`'s events are sent here, so filtering
should be applied manually in the subclass.
Args:
event (pygame.event.Event): event to handle
"""
if event.type == pygame.KEYDOWN:
self._keyboard._press(event.key)
elif event.type == pygame.KEYUP:
self._keyboard._release(event.key)
def on_enter(self, previous_scene: Optional["Scene"]) -> None:
"""
Override this to initialize upon scene entering.
If you override this method and want to use class variables
to change the application's settings, you must call
``super().on_enter(previous_scene)`` in the subclass.
Args:
previous_scene (Optional[Scene]): previous scene was running
"""
for attr in ("_title", "_resolution", "_update_rate"):
value = getattr(self, attr)
if value is not None:
if self._application is None:
print(f"Warning: application was not configured - '{attr}' setting was ignored")
continue
setattr(self._application, attr.lower(), value)
# Set event dispatcher
self.load_handlers()
def on_exit(self, next_scene: Optional["Scene"]) -> None:
"""
Override this to deinitialize upon scene exiting.
Args:
next_scene (Optional[Scene]): next scene to run
"""
|
kdeyev/pgz
|
pgz/scene.py
|
scene.py
|
py
| 9,429 |
python
|
en
|
code
| 4 |
github-code
|
6
|
[
{
"api_name": "typing.TYPE_CHECKING",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "utils.event_dispatcher.EventDispatcher",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 126,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 127,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 127,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 128,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 130,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 131,
"usage_type": "name"
},
{
"api_name": "keyboard.Keyboard",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "typing.Dict",
"line_number": 142,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 142,
"usage_type": "name"
},
{
"api_name": "uuid.uuid4",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "typing.Dict",
"line_number": 154,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 154,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 158,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 158,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 164,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 193,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 204,
"usage_type": "name"
},
{
"api_name": "clock.clock",
"line_number": 248,
"usage_type": "name"
},
{
"api_name": "clock.Clock",
"line_number": 239,
"usage_type": "name"
},
{
"api_name": "keyboard.Keyboard",
"line_number": 251,
"usage_type": "name"
},
{
"api_name": "screen.Screen",
"line_number": 260,
"usage_type": "name"
},
{
"api_name": "pygame.event",
"line_number": 276,
"usage_type": "attribute"
},
{
"api_name": "pygame.KEYDOWN",
"line_number": 287,
"usage_type": "attribute"
},
{
"api_name": "pygame.KEYUP",
"line_number": 289,
"usage_type": "attribute"
},
{
"api_name": "typing.Optional",
"line_number": 292,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 314,
"usage_type": "name"
}
] |
20519832620
|
"""!
@brief CCORE Wrapper for X-Means algorithm.
@authors Andrei Novikov ([email protected])
@date 2014-2020
@copyright BSD-3-Clause
"""
from ctypes import c_double, c_longlong, c_size_t, c_uint, POINTER
from pyclustering.core.wrapper import ccore_library
from pyclustering.core.pyclustering_package import pyclustering_package, package_extractor, package_builder
def xmeans(sample, centers, kmax, tolerance, criterion, alpha, beta, repeat, random_state, metric_pointer):
random_state = random_state or -1
pointer_data = package_builder(sample, c_double).create()
pointer_centers = package_builder(centers, c_double).create()
ccore = ccore_library.get()
ccore.xmeans_algorithm.restype = POINTER(pyclustering_package)
package = ccore.xmeans_algorithm(pointer_data, pointer_centers, c_size_t(kmax), c_double(tolerance),
c_uint(criterion), c_double(alpha), c_double(beta), c_size_t(repeat),
c_longlong(random_state), metric_pointer)
result = package_extractor(package).extract()
ccore.free_pyclustering_package(package)
return result
|
annoviko/pyclustering
|
pyclustering/core/xmeans_wrapper.py
|
xmeans_wrapper.py
|
py
| 1,207 |
python
|
en
|
code
| 1,113 |
github-code
|
6
|
[
{
"api_name": "pyclustering.core.pyclustering_package.package_builder",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "ctypes.c_double",
"line_number": 20,
"usage_type": "argument"
},
{
"api_name": "pyclustering.core.pyclustering_package.package_builder",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "ctypes.c_double",
"line_number": 21,
"usage_type": "argument"
},
{
"api_name": "pyclustering.core.wrapper.ccore_library.get",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "pyclustering.core.wrapper.ccore_library",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "ctypes.POINTER",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "pyclustering.core.pyclustering_package.pyclustering_package",
"line_number": 25,
"usage_type": "argument"
},
{
"api_name": "ctypes.c_size_t",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "ctypes.c_double",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "ctypes.c_uint",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "ctypes.c_double",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "ctypes.c_size_t",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "ctypes.c_longlong",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "pyclustering.core.pyclustering_package.package_extractor",
"line_number": 30,
"usage_type": "call"
}
] |
7176111759
|
#!/usr/bin/env python3
import os
import sys
import re
from pathlib import Path
def _find_files(project_root):
path_exclude_pattern = r"\.git($|\/)|venv|_build|\.tox"
file_exclude_pattern = r"fill_template_vars\.py|\.swp$"
filepaths = []
for dir_path, _dir_names, file_names in os.walk(project_root):
if not re.search(path_exclude_pattern, dir_path):
for file in file_names:
if not re.search(file_exclude_pattern, file):
filepaths.append(str(Path(dir_path, file)))
return filepaths
def _replace(pattern, replacement, project_root):
print(f"Replacing values: {pattern}")
for file in _find_files(project_root):
try:
with open(file) as f:
content = f.read()
content = re.sub(pattern, replacement, content)
with open(file, "w") as f:
f.write(content)
except UnicodeDecodeError:
pass
def main():
project_root = Path(os.path.realpath(sys.argv[0])).parent.parent
module_name = input("What is your python module name (ex: What would you import (no dashes)? ")
pypi_input = input(f"What is your pypi package name? (default: {module_name}) ")
pypi_name = pypi_input or module_name
repo_input = input(f"What is your github project name? (default: {pypi_name}) ")
repo_name = repo_input or pypi_name
rtd_input = input(
f"What is your readthedocs.org project name? (default: {pypi_name}) "
)
rtd_name = rtd_input or pypi_name
project_input = input(
f"What is your project name (ex: at the top of the README)? (default: {repo_name}) "
)
project_name = project_input or repo_name
short_description = input("What is a one-liner describing the project? ")
_replace("<MODULE_NAME>", module_name, project_root)
_replace("<PYPI_NAME>", pypi_name, project_root)
_replace("<REPO_NAME>", repo_name, project_root)
_replace("<RTD_NAME>", rtd_name, project_root)
_replace("<PROJECT_NAME>", project_name, project_root)
_replace("<SHORT_DESCRIPTION>", short_description, project_root)
os.makedirs(project_root / module_name, exist_ok=True)
Path(project_root / module_name / "__init__.py").touch()
Path(project_root / module_name / "py.typed").touch()
if __name__ == "__main__":
main()
|
ethereum/py-evm
|
.project-template/fill_template_vars.py
|
fill_template_vars.py
|
py
| 2,362 |
python
|
en
|
code
| 2,109 |
github-code
|
6
|
[
{
"api_name": "os.walk",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "os.path.realpath",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 67,
"usage_type": "call"
}
] |
44083632675
|
import gzip
import inspect
import os
from io import StringIO
from typing import Optional, List
import numpy as np
import pandas as pd
def time_map(time_a: float, time_b: float, packet_a: int, packet_b: int, time_c: int, window_tolerance: int = 0) -> \
Optional[float]:
"""
Map an API time into a packet number.
This function was done in order to have a nice visualisation.
The window tolereance is used to capture nearby calls
========= |
ta tb tc=>pc
pa pb
Args:
time_a (float): time of the begining of the flow (in seconds since epoch, time.time())
time_b (float): time of the ending of the flow
packet_a (int): packet number of the begining of the flow
packet_b (int): packet number of the ending of the flow
time_c (float): time of the api call
window_tolerance (int): time shift in second in which api calls are still considered to belong to the flow
Returns:
int: packet number, return None if the mapping fails
"""
window_tolerance *= 1000000
# Check if inside flow
if time_a <= time_c <= time_b: # Chain comparaison
return packet_a + (time_c - time_a) * (packet_b - packet_a) / (time_b - time_a)
# Check if in the window border (simple)
if time_a - window_tolerance <= time_c <= time_b:
return packet_a
"""if time_a <= time_c <= time_b + window_tolerance:
return packet_b"""
# Outside
return None
def pure_time_map(time_a: float, time_b: float, time_c: float, window_tolerance: int = 0) -> Optional[float]:
"""
Map an API time into a packet number.
This function was done in order to have a nice visualisation.
The window tolereance is used to capture nearby calls
========= |
ta tb tc=>pc
Args:
time_a (float): time of the begining of the flow (in seconds since epoch, time.time())
time_b (float): time of the ending of the flow
time_c (float): time of the api call
window_tolerance (int): time shift in second in which api calls are still considered to belong to the flow
Returns:
int: time, return None if the mapping fails
"""
window_tolerance *= 1000000
# Check if inside flow
if time_a <= time_c <= time_b: # Chain comparaison
return time_c
# Check if in the window border (simple)
if time_a - window_tolerance <= time_c <= time_b:
return time_a
"""if time_a <= time_c <= time_b + window_tolerance:
return time_b"""
# Outside
return None
def get_child_pids(current_pid: int, diff: pd.DataFrame) -> List:
"""
Get the child process pid of one process given its pid
Args:
current_pid (int): parent pid
diff (pd dataframe): df recorded
Returns:
list: list of pids
"""
childs = diff[diff["parent_pid"] == current_pid]
if childs.shape[0] == 0:
return [current_pid]
return [current_pid] + [v for index, row in childs.iterrows() for v in get_child_pids(row["process_id"], diff)]
def get_malware_pids(malware_name: str = "2020-09-30-Trickbot-EXE.exe", path: str = "./") -> List:
"""
Get the pids of all the malware generated processes
Args:
malware_name (str, optional): name of the malware. Defaults to "2020-09-08-Trickbot-EXE-gtag-ono72.exe".
Returns:
list: list of pids
path: path of the malware
"""
first = pd.read_csv(path + "process_pre.csv")
post = pd.read_csv(path + "process_post.csv")
first.drop(first.columns[0], axis=1, inplace=True)
post.drop(post.columns[0], axis=1, inplace=True)
diff = first.merge(post, indicator=True,
how='right').loc[lambda x: x['_merge'] != 'both']
try:
malware_pid = int(diff.loc[diff['process_name'] == malware_name]["process_id"].astype(float))
except TypeError:
raise RuntimeError('Malware PID not found, check malware name')
return get_child_pids(malware_pid, diff)
def gzip_to_string(file_path: str) -> str:
"""
Open a gzip file and load the content in a string
This function exists because the gzip may not be proprerly closed.
In this case, the end is corrupted but the rest can be read.
Args:
file_path (string): path of the gzip file
Returns:
str: content of the gzip file
"""
gzip_file = gzip.open(file_path, "rt")
string = ""
while True:
try:
line = gzip_file.readline()
string += line
except EOFError:
break
return string
def get_malware_traces(path: str = "./") -> List:
"""
Get a list of dataframe representing the frida trace
Its current format is [time, api_name, category]
Returns:
list: list of dataframe
"""
pids = list(set(get_malware_pids(path=path)))
traces = []
for pid in pids:
if not os.path.isfile(f"{path}frida_{pid}.txt.gz"):
print("Trace for {pid} does not exist")
header = ["time", "api", "category"]
frida_str = StringIO(gzip_to_string(f"{path}frida_{pid}.txt.gz"))
dataframe = pd.read_csv(frida_str, names=header)
dataframe.drop(
dataframe.loc[dataframe['api'] == 'error'].index, inplace=True)
dataframe.drop_duplicates(
subset=['time', 'api'], keep='first', inplace=True)
dataframe.reset_index(drop=True, inplace=True)
traces.append(dataframe)
dataframe["time_int"] = (dataframe["time"] * 1000000).astype(int)
return traces
class Singleton(type):
# Singleton modified to handle arguments (singleton for each argument set)
_instances = {}
_init = {}
def __init__(cls, name, bases, dct):
cls._init[cls] = dct.get('__init__', None)
def __call__(cls, *args, **kwargs):
init = cls._init[cls]
if init is not None:
key = (cls, frozenset(inspect.getcallargs(init, None, *args, **kwargs).items()))
else:
key = cls
if key not in cls._instances:
cls._instances[key] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[key]
class MalwareTraceExtractor(metaclass=Singleton):
def __init__(self, malware_name: str, path: str):
self.malware_name = malware_name
self.path = path
self.trace_array = None
self._get_trace()
def _get_trace(self) -> None:
pids = list(set(get_malware_pids(self.malware_name, path=self.path)))
return_array = np.empty((0, 4))
for pid in pids:
if not os.path.isfile(f"{self.path}frida_{pid}.txt.gz"):
# print(f"Trace for {pid} does not exist")
continue
header = ["time", "api", "category"]
frida_str = StringIO(gzip_to_string(f"{self.path}frida_{pid}.txt.gz"))
dataframe = pd.read_csv(frida_str, names=header)
dataframe.drop(
dataframe.loc[dataframe['api'] == 'error'].index, inplace=True)
dataframe.drop_duplicates(
subset=['time', 'api'], keep='first', inplace=True)
dataframe.reset_index(drop=True, inplace=True)
dataframe["time_int"] = (dataframe["time"] * 1000000).astype(np.int64)
# To numpy
np_array_df = dataframe.to_numpy()
# Stack to the final array
return_array = np.vstack((return_array, np_array_df))
self.trace_array = return_array
print(len(self.trace_array))
def get_merge_trace(self) -> np.ndarray:
return self.trace_array
def get_segmented_flow_syscalls(segmented_flow: np.ndarray, malware_process_name: str, path: str = "./",
time_delay_allowed: int = 0) -> np.ndarray:
"""
Get a list of API calls corresponding to the segmented flow
:param segmented_flow:
:param malware_process_name:
:param path:
:param time_delay_allowed:
:return:
"""
min_time, max_time = segmented_flow[0][2]
for group in segmented_flow[1:]:
# ['HANDSHAKE', [0, 2], [1612708961378936, 1612708961422139]]
timea, timeb = group[2]
min_time = min(timea, min_time)
max_time = max(timeb, max_time)
# print(min_time, max_time, max_time - min_time)
trace_extractor = MalwareTraceExtractor(malware_name=malware_process_name, path=path)
calls = trace_extractor.get_merge_trace()
# calls = get_malware_traces_merged(malware_process_name, path=path)
returned_calls = np.empty((0, 4))
for call in calls:
mapping = pure_time_map(min_time, max_time, call[3], time_delay_allowed)
if mapping is not None:
returned_calls = np.vstack((returned_calls, call))
return returned_calls
if __name__ == "__main__":
PATH = "trickbot1_1/"
|
llmhyy/malware-traffic
|
Experiments/exp16_visualisation/api_extraction.py
|
api_extraction.py
|
py
| 7,970 |
python
|
en
|
code
| 7 |
github-code
|
6
|
[
{
"api_name": "typing.Optional",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",
"line_number": 77,
"usage_type": "attribute"
},
{
"api_name": "typing.List",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "pandas.read_csv",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line_number": 95,
"usage_type": "name"
},
{
"api_name": "gzip.open",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 157,
"usage_type": "attribute"
},
{
"api_name": "io.StringIO",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line_number": 145,
"usage_type": "name"
},
{
"api_name": "inspect.getcallargs",
"line_number": 183,
"usage_type": "call"
},
{
"api_name": "numpy.empty",
"line_number": 201,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 203,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 203,
"usage_type": "attribute"
},
{
"api_name": "io.StringIO",
"line_number": 207,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 208,
"usage_type": "call"
},
{
"api_name": "numpy.int64",
"line_number": 214,
"usage_type": "attribute"
},
{
"api_name": "numpy.vstack",
"line_number": 218,
"usage_type": "call"
},
{
"api_name": "numpy.ndarray",
"line_number": 222,
"usage_type": "attribute"
},
{
"api_name": "numpy.ndarray",
"line_number": 226,
"usage_type": "attribute"
},
{
"api_name": "numpy.empty",
"line_number": 246,
"usage_type": "call"
},
{
"api_name": "numpy.vstack",
"line_number": 250,
"usage_type": "call"
},
{
"api_name": "numpy.ndarray",
"line_number": 227,
"usage_type": "attribute"
}
] |
39626332335
|
import numpy as np
from flask import Flask, request, render_template
import pickle
app = Flask(__name__)
model = pickle.load(open('model.pkl', 'rb'))
@app.route('/')
def home():
return render_template("index.html")
@app.route('/predict',methods=['POST'])
def predict():
label = ""
sepallength = request.form["sepallength"]
sepalwidth = request.form["sepalwidth"]
petallength = request.form["petallength"]
petalwidth =request.form["petallength"]
int_features = [sepallength,sepalwidth , petallength ,petalwidth]
final_features = [np.array(int_features)]
prediction = model.predict(final_features)[0]
if prediction == 0 :
label = "Iris-virginica"
elif prediction == 1:
label = "Iris-versicolor"
else:
label = "Iris-setosa"
return render_template('index.html', prediction_text='Predicted Flower should be $ {}'.format(label))
if __name__ == "__main__":
app.run(debug=True)
|
Karthicksaga/IRIS
|
app.py
|
app.py
|
py
| 938 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "flask.Flask",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 30,
"usage_type": "call"
}
] |
10422721293
|
from __future__ import annotations
from enum import Enum
from typing import TYPE_CHECKING, Any, TypeVar
if TYPE_CHECKING:
from collections.abc import Iterator
T = TypeVar("T", bound=Enum)
def iterate_enum(enum_class: type[T]) -> Iterator[T]:
assert issubclass(enum_class, Enum)
yield from enum_class
def add_long_name(enum_class: type[T], names: dict[T, str]) -> None:
add_per_enum_field(enum_class, "long_name", names)
def add_per_enum_field(enum_class: type[T], field_name: str, names: dict[T, Any]) -> None:
if set(enum_class) != set(names.keys()):
raise ValueError(f"{field_name} for {enum_class} are not synchronized")
for key, value in names.items():
setattr(key, field_name, value)
|
randovania/randovania
|
randovania/lib/enum_lib.py
|
enum_lib.py
|
py
| 739 |
python
|
en
|
code
| 165 |
github-code
|
6
|
[
{
"api_name": "typing.TYPE_CHECKING",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "typing.TypeVar",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "enum.Enum",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "enum.Enum",
"line_number": 13,
"usage_type": "argument"
},
{
"api_name": "collections.abc.Iterator",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 21,
"usage_type": "name"
}
] |
32043413373
|
import getpass
import datetime
import urllib, urllib.request
import os, sys
from random import randint
from shutil import copyfileobj
from html.parser import HTMLParser
#Time, Right Now.
now = datetime.datetime.now()
#Get local username
UserName = getpass.getuser()
#Define End-Of-Script Quit-Action
def quitting_time():
print()
print()
print("Your Precious Cargo Is On Your Desktop, Inside The Folder 'ThisAmericanLife'.")
print("I Now Retire To My Humble Abode.")
print("Thank You, User, For This Opportunity.")
input("Press ENTER To End Program.")
sys.exit(0)
#Change download directory to "ThisAmericanLife" on User's Desktop
if not os.access('/home/' + UserName + '/Desktop/ThisAmericanLife/', os.F_OK):
os.mkdir('/home/' + UserName + '/Desktop/ThisAmericanLife/')
os.chdir('/home/'+ UserName +'/Desktop/ThisAmericanLife/')
#Required for parsing and stripping HTML data
class MLStripper(HTMLParser): #Supports the stripping of tags from "straight html"
def __init__(self):
super().__init__()
self.reset()
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def get_data(self):
return ''.join(self.fed)
#This strips html tags from html; input must be "straight html"
def strip_tags(html):
s = MLStripper()
s.feed(html)
return s.get_data()
#Define asking User what s/he wants to do
def what_to_do (retries=4, complaint='Answer With A Number Between 1 and 5, Please'):
Option_01 = set(['1', '01', 'one'])
Option_02 = set(['2','02','two'])
Option_03 = set(['3','03','three'])
Option_04 = set(['4','04','four'])
Option_05 = set(['5','05','five'])
while True:
we_are_doing = input("Answer With Numbers 1 thru 5 >> ").lower()
if we_are_doing in Option_01:
print("We'll now download one episode of your choice.")
print()
One_OneEpisode()
if we_are_doing in Option_02:
print("We'll now download your choice of a block of episodes.")
print()
Two_EpisodeBlock()
if we_are_doing in Option_03:
print("We'll now download episodes from your choice to the current episode.")
print()
Three_ScatteredEpisodes()
if we_are_doing in Option_04:
print("We'll now download a smattering of episodes of your choice.")
print()
Four_EpiChoiceToCurrent()
if we_are_doing in Option_05:
print("We'll now download five random episodes for you.")
print()
Five_5RandomEpis()
retries = retries - 1
if retries < 0:
print("You Are Incapable Of Following Instructions.")
print("I'm Done Trying To Help You.")
input("Press ENTER To Quit, As I Have.")
sys.exit(0)
print(complaint)
####### Subroutine: Get Latest Episode Number #######
def get_latest_episode_number():
#Note: this subroutine creates and deletes a temporary txt file, "TAL_Archive_HTML.txt"
###Global Variables###
#this is required because we need to know what the latest episode number is Everywhere
global LatestEpisodeNumber
###Global Variables###
#Get the HTML source code from T.A.L.'s Archive URL
website = urllib.request.urlopen("http://www.thisamericanlife.org/radio-archives").read()
#Save Website to risk_reward.txt [file will be removed when the data is retrieved]
strip_write = open('/home/' + UserName + '/Desktop/ThisAmericanLife/TAL_Archive_HTML.txt', 'w')
strip_write.write(strip_tags(website.decode('utf-8')))
strip_write.close()
#Search through HTML-stripped source data for the latest episode, keying off the first instance of "Share"
with open('/home/' + UserName + '/Desktop/ThisAmericanLife/TAL_Archive_HTML.txt', 'r+') as TAL_Archive_HTML:
for line in TAL_Archive_HTML:
if "Share" in line:
#Assign the latest episode's information [number, title, date] without leading spaces to variable CurrentEpisodeLineInfo
CurrentEpisodeLineInfo = TAL_Archive_HTML.__next__().lstrip()
#Assign the last 10 characters of space-stripped CurrentEpisodeLineInfo to LatestEpisodeDate [DD.MM.YYYY]
LatestEpisodeDate = CurrentEpisodeLineInfo[-11:-1]
#Check if the latest queue'd episode is available today, keying off date information
if now.strftime("%m.%d.%Y") < LatestEpisodeDate:
LatestEpisodeNumber = int(CurrentEpisodeLineInfo[0:3]) - 1
elif now.strftime("%m.%d.%Y") == LatestEpisodeDate:
#We need to find out when today's episode is released for download... today, or tomorrow.
#GetTheTodayActionStarted
LatestEpisodeNumber = int(CurrentEpisodeLineInfo[0:3]) - 1
pass
elif now.strftime("%m.%d.%Y") > LatestEpisodeDate:
LatestEpisodeNumber = CurrentEpisodeLineInfo[0:3]
else:
print()
print()
print("Call The Doctor.")
print("I Now Hide Behind Cpt. Jack Harkness For Safety.")
input("Press ENTER To Escape Your Doom.")
sys.exit(0)
break
print("The latest episode in the queue is " + CurrentEpisodeLineInfo)
print()
print("The latest available episode is Episode #" + str(LatestEpisodeNumber))
print()
#Remove the temporary txt file TAL_Archive_HTML.txt
os.remove("/home/" + UserName + "/Desktop/ThisAmericanLife/TAL_Archive_HTML.txt")
####### Subroutine: Get Episode Number From User #######
def get_episode_number_from_user():
###Global Variables###
#This is required because we need to know the episode number Everywhere
global EpisodeNumber
#This is required in case User enters "0" for the episode number
global nakednumber
###Global Variables###
number = input("Give Me An Episode Number >> ")
print()
nakednumber = number.lstrip("0")
if nakednumber == "":
print("You told me to download 'Episode 0', which does not exist.")
get_episode_number_from_user()
else:
pass
EpisodeNumber = int(nakednumber)
if(EpisodeNumber >= 1):
pass
else:
print("You didn't give me a positive whole number.")
get_episode_number_from_user()
####### Subroutine: Generate Random Episode Number #######
def generate_random_episode_number():
###Global Variables###
#This is required because we need to know the episode number Everywhere
global EpisodeNumber
###Global Variables###
RandomEpisodeNumber = randint(0,int(LatestEpisodeNumber))
EpisodeNumber = RandomEpisodeNumber
####### Subroutine: Check Number Is Valid #######
def check_epi_number_validity():
if EpisodeNumber >= int(LatestEpisodeNumber):
while (EpisodeNumber >= int(LatestEpisodeNumber)):
print("The episode number you have chosen is in the Future.")
print()
get_episode_number_from_user()
else:
pass
####### Subroutine: Download The Episode #######
def download_the_episode():
###Global Variables###
#this is required because we need to know the episode number Everywhere
global EpisodeNumber
###Global Variables###
mp3 = str(EpisodeNumber) + ".mp3"
with urllib.request.urlopen(("http://audio.thisamericanlife.org/" + str(EpisodeNumber) + "/" + str(EpisodeNumber) + ".mp3")) as in_stream, open(mp3, 'wb') as out_file:
copyfileobj(in_stream, out_file)
print()
print("I have finished downloading episode #" + str(EpisodeNumber) + " of This American Life.")
print()
##################################################
##########Executing The User's Options############
##################################################
####### Download One Episode #######
def One_OneEpisode():
get_latest_episode_number()
get_episode_number_from_user()
check_epi_number_validity()
download_the_episode()
quitting_time()
####### Download A Choice Block Of Episodes #######
def Two_EpisodeBlock():
#This is required because we need to know the episode number Everywhere
global EpisodeNumber
###Global Variables###
get_latest_episode_number()
#Get the first boarder episode number from User
print("What episode number is at the beginning of this block of episodes?")
get_episode_number_from_user()
check_epi_number_validity()
FirstNumber = EpisodeNumber
#Get the second boarder episode number from User
print("What episode number is at the end of this block of episodes?")
get_episode_number_from_user()
check_epi_number_validity()
SecondNumber = EpisodeNumber
#A list of the boarder episode numbers
boarder_episodes = [FirstNumber,SecondNumber]
#Find and establish which episode number inputted has the larger value
HigherEpisodeNumber = max(boarder_episodes)
#Find and establish which episode number inputted has the smaller value
LowerEpisodeNumber = min(boarder_episodes)
#Asshole Condition [block of 1 episode]
#We are going to use EpisodeNumber to download, and admonish the User
if FirstNumber == SecondNumber:
print()
print("You should have chosen Option #1: 'Download One Episode of your choice'.")
print("I don't want to out of principle, but to be nice I shall help you anyway.")
print()
download_the_episode()
quitting_time()
else:
pass
#Calculate how many episodes to download
DownloadCycles = int(HigherEpisodeNumber) - int(LowerEpisodeNumber) + 1
#Prime the EpisodeNumber variable for looping
EpisodeNumber = int(LowerEpisodeNumber)
#Download those episodes!
for n in range(0,DownloadCycles):
download_the_episode()
EpisodeNumber = EpisodeNumber + 1
quitting_time()
####### Download Scattered Episodes #######
def Three_ScatteredEpisodes():
get_latest_episode_number()
HowManyEpisodes = input("How Many Episodes Would You Like To Download? >> ")
if(int(HowManyEpisodes) >= 1):
pass
else:
while (int(HowManyEpisodes) < 1):
print()
print("You didn't give me a counting number.")
HowManyEpisodes = input("How Many Episodes Would You Like To Download? >> ")
print()
if int(HowManyEpisodes) > (int(LatestEpisodeNumber) + 1):
while (int(HowManyEpisodes) > (int(LatestEpisodeNumber) + 1)):
print()
print("There are not that many episodes to download at this time.")
print()
print("There are " + str(LatestEpisodeNumber) + " available to download at this time.")
print()
HowManyEpisodes = input("How Many Episodes Would You Like To Download? >> ")
print()
for n in range(0,int(HowManyEpisodes)):
get_episode_number_from_user()
check_epi_number_validity()
download_the_episode()
quitting_time()
####### Download Choice To Latest Available #######
def Four_EpiChoiceToCurrent():
###Global Variables###
#This is required because we need to know the episode number Everywhere
global EpisodeNumber
###Global Variables###
get_latest_episode_number()
print("I need to know what episode you want to start with.")
get_episode_number_from_user()
check_epi_number_validity()
#Calculate how many episodes to download
DownloadCycles = int(LatestEpisodeNumber) - EpisodeNumber + 1
#Download those episodes!
for n in range(0,DownloadCycles):
download_the_episode()
EpisodeNumber = EpisodeNumber +1
quitting_time()
####### Download Five Random Episodes #######
def Five_5RandomEpis():
###Global Variables###
#This is required because we need to know the episode number Everywhere
global EpisodeNumber
###Global Variables###
get_latest_episode_number()
for n in range(0,5):
EpisodeNumber = randint(1,int(LatestEpisodeNumber))
download_the_episode()
quitting_time()
##################################################
############### Kick Off The Script ##############
##################################################
#Print-to-Screen Introduction
print('========== =========== ==========')
print("Hello " + UserName + "!")
print("The time is", now.strftime("%Y-%m-%d %H:%M")) #Print to Terminal time, time right now;
print("Let's Download Some Episodes of 'This American Life'.")
print('========== =========== ==========')
#Prompt User On What To Do
print("What Type Of Downloading Would We Like To Do? ")
print()
print("Option 1: One [1] episode of your choice.")
print("Option 2: A continuous block of episodes, your choice.")
print("Option 3: A discontinuous block of episodes, your choice.")
print("Option 4: All episodes between your choice and the current episode [inclusive].")
print("Option 5: Five [5] random episodes.")
print()
what_to_do()
#EndFile.
|
milesnielsen/DownloadEpisodesTAL
|
TAL_Epi_Download.py
|
TAL_Epi_Download.py
|
py
| 13,624 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "datetime.datetime.now",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "getpass.getuser",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.access",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "os.F_OK",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "os.mkdir",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "os.chdir",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "html.parser.HTMLParser",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "html.parser",
"line_number": 47,
"usage_type": "argument"
},
{
"api_name": "sys.exit",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "urllib.request.urlopen",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "urllib.request",
"line_number": 101,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "os.remove",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 188,
"usage_type": "call"
},
{
"api_name": "urllib.request.urlopen",
"line_number": 215,
"usage_type": "call"
},
{
"api_name": "urllib.request",
"line_number": 215,
"usage_type": "attribute"
},
{
"api_name": "shutil.copyfileobj",
"line_number": 216,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 380,
"usage_type": "call"
}
] |
19209409927
|
import re
import nltk
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
# Dictionary mapping word contractions to their full words
contractions = {
"ain't": "are not","'s":" is","aren't": "are not",
"can't": "cannot","can't've": "cannot have",
"'cause": "because","could've": "could have","couldn't": "could not",
"couldn't've": "could not have", "didn't": "did not","doesn't": "does not",
"don't": "do not","hadn't": "had not","hadn't've": "had not have",
"hasn't": "has not","haven't": "have not","he'd": "he would",
"he'd've": "he would have","he'll": "he will", "he'll've": "he will have",
"how'd": "how did","how'd'y": "how do you","how'll": "how will",
"I'd": "I would", "I'd've": "I would have","I'll": "I will",
"I'll've": "I will have","I'm": "I am","I've": "I have", "isn't": "is not",
"it'd": "it would","it'd've": "it would have","it'll": "it will",
"it'll've": "it will have", "let's": "let us","ma'am": "madam",
"mayn't": "may not","might've": "might have","mightn't": "might not",
"mightn't've": "might not have","must've": "must have","mustn't": "must not",
"mustn't've": "must not have", "needn't": "need not",
"needn't've": "need not have","o'clock": "of the clock","oughtn't": "ought not",
"oughtn't've": "ought not have","shan't": "shall not","sha'n't": "shall not",
"shan't've": "shall not have","she'd": "she would","she'd've": "she would have",
"she'll": "she will", "she'll've": "she will have","should've": "should have",
"shouldn't": "should not", "shouldn't've": "should not have","so've": "so have",
"that'd": "that would","that'd've": "that would have", "there'd": "there would",
"there'd've": "there would have", "they'd": "they would",
"they'd've": "they would have","they'll": "they will",
"they'll've": "they will have", "they're": "they are","they've": "they have",
"to've": "to have","wasn't": "was not","we'd": "we would",
"we'd've": "we would have","we'll": "we will","we'll've": "we will have",
"we're": "we are","we've": "we have", "weren't": "were not","what'll": "what will",
"what'll've": "what will have","what're": "what are", "what've": "what have",
"when've": "when have","where'd": "where did", "where've": "where have",
"who'll": "who will","who'll've": "who will have","who've": "who have",
"why've": "why have","will've": "will have","won't": "will not",
"won't've": "will not have", "would've": "would have","wouldn't": "would not",
"wouldn't've": "would not have","y'all": "you all", "y'all'd": "you all would",
"y'all'd've": "you all would have","y'all're": "you all are",
"y'all've": "you all have", "you'd": "you would","you'd've": "you would have",
"you'll": "you will","you'll've": "you will have", "you're": "you are",
"you've": "you have"
}
STOPWORDS = stopwords.words('english')
meaningless_words = ['hotel','stay','hilton','location','room','service','airport','staff','london','night','flight','overnight','rooms', 'experience','gatwick','ever','holiday','one', 'stayed','would','breakfast','bed','check','get','us','time','reception','terminal','bar','food','booked','walk','bathroom', 'really','early','could','also','restaurant','morning','even','floor','next','back','day','two', 'got','executive','south','shower','first','long','need','area', 'minutes','lounge','went','much','told','sleep', 'arrived','hotels','work','station','nights','beds', 'quite','bit','go','people','car']
for word in meaningless_words:
STOPWORDS.append(word)
# Remove punctutation marks, stopwords, emojis, urls, convert to lowercase, expand contractions, hashtags, retweet
def preprocess_review(review):
res_review = []
lemmatizer = WordNetLemmatizer()
for word in review.split():
# Convert to lowercase
word = word.lower()
# Expand Contractions
word = contractions.get(word, word)
for w in word.split(" "):
# Remove stopwords
if w not in STOPWORDS:
# w = splitter.split(w)
# Remove punctuation
w = re.sub(r'[^\w\s]', '', str(w))
# Remove numbers
w = re.sub(r'\d+', '', w)
# Lemmatize the word
w = lemmatizer.lemmatize(w, pos='v')
if w != '':
res_review.append(w)
return ' '.join([word for word in res_review])
|
kelvinchumbe/Hotel-Review-Mining-and-Web-App
|
Hotel Review Mining/Web App Deployment/api/preprocessing_utils.py
|
preprocessing_utils.py
|
py
| 4,650 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "nltk.corpus.stopwords.words",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "nltk.corpus.stopwords",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "nltk.stem.WordNetLemmatizer",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 78,
"usage_type": "call"
}
] |
71281284349
|
# mysql 테이블 생성 및 데이터 추가
import pandas as pd
import pymysql
xl_file = '/Users/JaehoByun/JB/_School/2021_2 데이터사이언스/과제및시험/score.xlsx'
df = pd.read_excel(xl_file)
conn = pymysql.connect(host='localhost', user='root', password='chunjay606', db='data_science')
curs = conn.cursor(pymysql.cursors.DictCursor)
# 테이블 생성
mk_table_sql = """create table if not exists score
(sno int primary key,
attendance float,
homework float,
discussion int,
midterm float,
final float,
score float,
grade varchar(3))"""
curs.execute(mk_table_sql)
# 데이터 넣기
insert_sql = """insert into score(sno, attendance, homework, discussion, midterm, final, score, grade)
values (%s, %s, %s, %s, %s, %s, %s, %s)"""
for idx in range(len(df)):
curs.execute(insert_sql, tuple(df.values[idx]))
conn.commit()
# 데이터 삽입 확인
show_table_sql = 'select * from score'
curs.execute(show_table_sql)
row = curs.fetchone()
while row:
print(row)
row = curs.fetchone()
curs.close()
conn.close()
|
bjho606/python_school-data-science
|
score_assignment2.py
|
score_assignment2.py
|
py
| 1,130 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pandas.read_excel",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pymysql.connect",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pymysql.cursors",
"line_number": 9,
"usage_type": "attribute"
}
] |
17635143913
|
from collections import Counter
import pandas as pd
def transform(new_subjects):
list_keys = list(Counter(new_subjects).keys())
list_values = list(Counter(new_subjects).values())
df_keys = pd.DataFrame(list_keys, columns=['subject'])
df_values = pd.DataFrame(list_values, columns=['frequency'])
df_arxiv = pd.concat([df_keys, df_values], axis=1)
df_arxiv['frequency'] = pd.to_numeric(df_arxiv['frequency'])
df_arxiv = df_arxiv.sort_values(by=['frequency'], ascending=False)
return df_arxiv
|
ThomasKranz/arxiv_ETL
|
src/transformer.py
|
transformer.py
|
py
| 526 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "collections.Counter",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "collections.Counter",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pandas.to_numeric",
"line_number": 12,
"usage_type": "call"
}
] |
17650565567
|
import nltk
from newspaper import Article
# nltk.download('punkt') is a Python command that is used to download the "punkt" dataset or resource from the Natural Language Toolkit (NLTK) library.
# NLTK is a popular library in Python for working with human language data, including tasks like tokenization, parsing, and text classification.
# The "punkt" dataset in NLTK contains pre-trained models and data necessary for tokenization, which is the process of breaking down a text into individual words or tokens.
# These pre-trained models can be used to tokenize text in various languages, making it easier to work with natural language data in your Python projects.
nltk.download('punkt')
url='https://indianexpress.com/article/technology/tech-news-technology/apple-event-2-things-wowed-us-8938618/'
article = Article(url)
article.download()
article.parse()
article.nlp()
print(f'Title: {article.title}')
print(f'Authors: {article.authors}')
print(f'Publish Date: {article.publish_date}')
print(f'Summary: {article.summary}')
|
AnukulSri/summarize-news-article
|
news.py
|
news.py
|
py
| 1,033 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "nltk.download",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "newspaper.Article",
"line_number": 12,
"usage_type": "call"
}
] |
32259974513
|
### SPDX-License-Identifier: GPL-2.0-or-later
"""Parse phc2sys log messages"""
import re
from collections import namedtuple
from .parser import (Parser, parse_decimal)
class TimeErrorParser(Parser):
"""Parse time error from a phc2sys log message"""
id_ = 'phc2sys/time-error'
elems = ('timestamp', 'terror', 'state', 'delay')
y_name = 'terror'
parsed = namedtuple('Parsed', elems)
@staticmethod
def build_regexp():
"""Return a regular expression string for parsing phc2sys log file lines"""
return r'\s'.join((r'^phc2sys'
+ r'\[([1-9][0-9]*\.[0-9]{3})\]:' # timestamp
+ r'(?:\s\[ptp4l\.\d\..*\])?', # configuration file name
r'CLOCK_REALTIME phc offset\s*',
r'(-?[0-9]+)', # time error
r'(\S+)', # state
r'freq\s*',
r'([-+]?[0-9]+)', # frequency error
r'delay\s*',
r'(-?[0-9]+)' # delay
+ r'\s*.*$'))
def __init__(self):
super().__init__()
self._regexp = re.compile(self.build_regexp())
def make_parsed(self, elems):
if len(elems) < len(self.elems):
raise ValueError(elems)
timestamp = parse_decimal(elems[0])
terror = int(elems[1])
state = str(elems[2])
delay = int(elems[3])
return self.parsed(timestamp, terror, state, delay)
def parse_line(self, line):
matched = self._regexp.match(line)
if matched:
return self.make_parsed((
matched.group(1),
matched.group(2),
matched.group(3),
matched.group(5),
))
return None
|
redhat-partner-solutions/vse-sync-pp
|
src/vse_sync_pp/parsers/phc2sys.py
|
phc2sys.py
|
py
| 1,846 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "parser.Parser",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "collections.namedtuple",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "parser.parse_decimal",
"line_number": 40,
"usage_type": "call"
}
] |
75275385466
|
from datetime import datetime
from time import process_time
# file = open(address, mood)
# with open('oi.txt', 'r', encoding='utf-8') as file:
# content = file.read()
# print(content)
with open('log.txt', 'w', encoding='utf-8') as file:
file.write('Horários de log dos funcionários')
# with open('log.txt', 'r', encoding='utf-8') as file:
# content = file.read()
# print(content)
status = False
tempo_trabalhado = 0
answer = input('Quer entrar no sistema? ').lower()
if answer == 'sim':
status = True
t1 = process_time()
name = input('Digite seu nome: ').upper()
with open('log.txt', 'a', encoding='utf-8') as file:
date_now = datetime.now()
log = date_now.strftime('%d-%m-%Y %H:%M:%S')
file.write(f'\n{name} entrou {log}')
if status:
answer = input('Quer sair do sistema? ').lower()
if answer == 'sim':
status = False
t2 = process_time()
tempo_trabalhado += (t2-t1)
with open('log.txt', 'a', encoding='utf-8') as file:
date_now = datetime.now()
log = date_now.strftime('%d-%m-%Y %H:%M:%S')
file.write(f'\n{name} saiu {log}')
with open('log.txt', 'r', encoding='utf-8') as file:
content = file.read()
print(content)
print(tempo_trabalhado)
|
ewertonpereira/python
|
test/testing.py
|
testing.py
|
py
| 1,302 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "time.process_time",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "time.process_time",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 40,
"usage_type": "name"
}
] |
38435402089
|
import json
import pandas
def read_json(filename: list) -> dict:
try:
with open(filename, "r") as f:
data = json.loads(f.read())
except:
raise Exception(f"Reading {filename} file encountered an error")
return data
def create_dataframe(data: str) -> pandas.DataFrame:
# Declare an empty dataframe to append records
dataframe = pandas.DataFrame()
# Looping through each record
for d in data['workers']:
# Normalize the column levels
name_details = pandas.json_normalize(d, record_path=['nameDetails'])
contact_details = pandas.json_normalize(d, record_path=['phoneContactDetails'])
email = pandas.json_normalize(d, record_path=['emailContactDetails'],meta=[['employmentSummary','createAccessDate'],['employmentSummary','createAccessTime'],['employmentSummary','mostRecentHireDate']])
record = pandas.json_normalize(d, record_path=['addressDetails'])
job_details = pandas.json_normalize(d, record_path=['jobDetails'],meta=['workerIdentifier'])
new = pandas.concat([name_details,contact_details,email,record,job_details],axis=1,join='inner')
# Append it to the dataframe
dataframe = dataframe.append(new, ignore_index=True)
return dataframe
def main():
# Read the JSON file as python dictionary
data = read_json(filename="work.json")
# Generate the dataframe for the array items in
# details key
dataframe = create_dataframe(data=data['workerDataResponse'])
# Renaming columns of the dataframe
dataframe.columns.to_list()
dataframe.rename(columns={
"employmentSummary.createAccessDate": "accessDate",
"employmentSummary.createAccessTime": "accessTime",
"employmentSummary.mostRecentHireDate": "mostRecentHireDate",
"employmentJobProfileDetails.jobProfileIdentifier": "jobProfileIdentifier",
"jobGovernanceRoleDetails.functionalManagerWorkerIdentifier": "functionalManagerWorkerIdentifier",
"organizationDetails.companyOrganizationIdentifier":"companyOrganizationIdentifier"
}, inplace=True)
dataframe.columns.to_list()
# Convert dataframe to CSV
dataframe.to_csv("emp_data.csv", index=False)
if __name__ == '__main__':
main()
|
PrasadWakle/jsontocsv
|
jsontocsv.py
|
jsontocsv.py
|
py
| 2,340 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "json.loads",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "pandas.json_normalize",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "pandas.json_normalize",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "pandas.json_normalize",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "pandas.json_normalize",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "pandas.json_normalize",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 16,
"usage_type": "attribute"
}
] |
11464353853
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 7 13:18:01 2022
@author: sampasmann
"""
import time
import numpy as np
from mpi4py import MPI
from src.functions.save_data import SaveData
from src.solvers.fixed_source.solvers import Picard
from src.solvers.eigenvalue.maps import MatVec_data, MatVec
from scipy.sparse.linalg import gmres, lgmres, bicgstab, LinearOperator
import scipy.linalg as sp
from src.solvers.eigenvalue.maps import SI_Map
# =============================================================================
# Iteration and Residual Storage for Krylov Solvers
# =============================================================================
class gmres_counter(object):
def __init__(self, disp=True):
self._disp = disp
self.iter = 0
self.callbacks = []
def __call__(self, rk=None):
self.callbacks.append(rk.copy())
self.iter += 1
if self._disp:
if (self.iter>1):
print(" Iteration:", self.iter-1, "change: ",
np.linalg.norm((rk - self.callbacks[self.iter-2])))
# =============================================================================
# Power Iteration
# =============================================================================
# TODO: Picard PI is not working
def PowerIteration(qmc_data, solver="LGMRES", max_outter_itt=10,
max_inner_itt=10, outter_tol=1e-5, inner_tol=1e-5,
report_progress=True):
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
# nproc = comm.Get_size()
itt = 0
k = qmc_data.keff
dk = 1.0
phi_old = qmc_data.tallies.phi_f.copy()
#res_hist = []
k_hist = []
if (rank==0):
print("")
print(" ██╗ ██████╗ ███╗ ███╗ ██████╗")
print(" ║ ║██╔═══██╗████╗ ████║██╔════╝")
print(" ██║██║ ██║██╔████╔██║██║ ")
print(" ██║██║▄▄ ██║██║╚██╔╝██║██║ ")
print(" ██║╚██████╔╝██║ ╚═╝ ██║╚██████╗")
print(" ╚═╝ ╚══▀▀═╝ ╚═╝ ╚═╝ ╚═════╝")
print("")
print("--------- K-Effective Eigenvalue Problem ---------")
print("Outter Solver: Power Iteration")
print("Inner Sovler:", solver)
print("Material: ", qmc_data.material_code)
print("Random Number Generator: ", qmc_data.generator)
print("Number of Particles per Iteration: ", qmc_data.N)
print("Number of Spatial Cells: ", qmc_data.Nx)
print("Initial K: ", qmc_data.keff)
# iterate over k effective
while (itt<=max_outter_itt) and (dk>=outter_tol):
# iterate over scattering source
phi_new = InnerIteration(qmc_data, solver=solver,
maxit=max_inner_itt,tol=inner_tol,
report_progress=report_progress)
#phi_hist.append(phi_new)
k_old = k
k = UpdateK(phi_old, phi_new, qmc_data)
k_hist.append(k)
qmc_data.keff = k
#res_hist.append(np.linalg.norm(phi_new-phi_old))
qmc_data.tallies.phi_f = phi_new.copy()
phi_old = phi_new.copy() # /norm(phi_new)
if (qmc_data.source_tilt):
qmc_data.tallies.dphi_f = qmc_data.tallies.dphi_s
dk = abs(k-k_old)
itt += 1
if (rank==0) and (report_progress):
print("**********************")
print("Iteration:", itt)
print("k: ", k)
print("dk: ",dk)
if (rank==0):
if (itt>=max_outter_itt):
print("Power Iteration convergence to tolerance not achieved: Maximum number of iterations.")
elif (dk<=outter_tol):
print("-------------------------------")
print("Successful Power Iteration convergence.")
return phi_new, k_hist, itt #, res_hist
# =============================================================================
# Inner Source Iteration for Power Iteration
# =============================================================================
# TODO: make exitCode an actual output from Picard
def InnerIteration(qmc_data,solver="LGMRES",tol=1e-5,maxit=50,save_data=False,
report_progress=True):
"""
Parameters
----------
qmc_data : TYPE
DESCRIPTION.
tol : TYPE, optional
DESCRIPTION. The default is 1e-5.
maxit : TYPE, optional
DESCRIPTION. The default is 50.
Returns
-------
phi : TYPE
DESCRIPTION.
"""
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
nproc = comm.Get_size()
Nx = qmc_data.Nx
G = qmc_data.G
Nv = Nx*G
Nt = qmc_data.Nt
start = time.time()
matvec_data = MatVec_data(qmc_data)
if (qmc_data.source_tilt):
phi0 = np.append(qmc_data.tallies.phi_avg, qmc_data.tallies.dphi_s)
else:
phi0 = qmc_data.tallies.phi_avg
phi0 = np.reshape(phi0,(Nt,1))
if (rank==0) and (report_progress):
print(" Inner Iteration: ")
if (solver=="Picard"):
phi = Picard(qmc_data,tol=tol,maxit=maxit,save_data=False,
report_progress=report_progress)
exitCode = 0
else:
A = LinearOperator((Nt,Nt),
matvec=MatVec,
rmatvec=MatVec,
matmat= MatVec,
rmatmat=MatVec,
dtype=float)
b = matvec_data[0]
if (solver=="LGMRES"):
counter = gmres_counter(disp=report_progress)
gmres_out = lgmres(A,b,x0=phi0,tol=tol,maxiter=maxit, callback=counter)
elif (solver=="GMRES"):
counter = gmres_counter(disp=report_progress)
gmres_out = gmres(A,b,x0=phi0,tol=tol,maxiter=maxit, callback=counter)
elif (solver=="BICGSTAB"):
counter = gmres_counter(disp=report_progress)
gmres_out = bicgstab(A,b,x0=phi0,tol=tol,maxiter=maxit, callback=counter)
else:
print(" Not a valid solver ")
Exception
phi = gmres_out[0]
exitCode = gmres_out[1]
stop = time.time()
run_time = stop - start
if (qmc_data.source_tilt):
phi = phi[:Nv]
phi = np.reshape(phi, (Nx,G))
if (rank==0):
if (save_data):
sim_data = SimData(phi, run_time, tol, nproc)
SaveData(qmc_data, sim_data)
if (exitCode>0) and (report_progress):
print(" Convergence to tolerance not achieved: Maximum number of iterations.")
elif (exitCode<0) and (report_progress):
print(" Illegal input or breakdown.")
elif (exitCode==0) and (report_progress):
print(" Successful convergence.")
return phi
def UpdateK(phi_f, phi_s, qmc_data):
keff = qmc_data.keff
material = qmc_data.material
keff *= (np.sum(material.nu*material.sigf*phi_s)
/np.sum(material.nu*material.sigf*phi_f))
return keff
# =============================================================================
# Davidson's Algorithm
# =============================================================================
# TODO: Correct normalization of scalar flux in Davidson's output
# TODO: Enable Source Tilting with Davidson's
def Davidson(qmc_data, k0=1.0, l=1, m=None, numSweeps=8, tol=1e-6, maxit=30,
report_progress=True):
"""
Parameters
----------
qmc_data : qmc_data structure
k0 : Float, optional
DESCRIPTION. The default is 1.0.
l : Int, optional
DESCRIPTION. Number of eigenvalues and vectors to solver for The default is 1.
m : Int, optional
DESCRIPTION. Restart parameter. The default is 5.
numSweeps : Int, optional
DESCRIPTION. The default is 5.
tol : Float, optional
DESCRIPTION. The default is 1e-6.
maxit : Int, optional
DESCRIPTION. The default is 30.
Returns
-------
phi : TYPE
DESCRIPTION.
keff : TYPE
DESCRIPTION.
itt : TYPE
DESCRIPTION.
"""
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
# Davidson Parameters
Nt = qmc_data.Nt
if (qmc_data.source_tilt):
phi0 = np.append(qmc_data.tallies.phi_avg, qmc_data.tallies.dphi_s)
else:
phi0 = qmc_data.tallies.phi_avg
phi0 = np.reshape(phi0,(Nt))
# u = qmc_data.tallies.phi_f.reshape(Nt)
V0 = np.array(phi0/np.linalg.norm(phi0).T) # orthonormalize initial guess
V = np.zeros((Nt,maxit))
axv = np.zeros((Nt,maxit))
bxv = np.zeros((Nt,maxit))
Vsize = 1
V[:,0] = V0
k_old = 0.0
dk = 1.0
itt = 1
if (rank==0):
print("")
print(" ██╗ ██████╗ ███╗ ███╗ ██████╗")
print(" ║ ║██╔═══██╗████╗ ████║██╔════╝")
print(" ██║██║ ██║██╔████╔██║██║ ")
print(" ██║██║▄▄ ██║██║╚██╔╝██║██║ ")
print(" ██║╚██████╔╝██║ ╚═╝ ██║╚██████╗")
print(" ╚═╝ ╚══▀▀═╝ ╚═╝ ╚═╝ ╚═════╝")
print("")
print("--------- K-Effective Eigenvalue Problem ---------")
print("Outter Solver: Davidson's Method")
print("Material: ", qmc_data.material_code)
print("Random Number Generator: ", qmc_data.generator)
print("Number of Particles per Iteration: ", qmc_data.N)
print("Number of Spatial Cells: ", qmc_data.Nx)
print("Initial K: ", qmc_data.keff)
if (m is None):
m = maxit+1 # unless specified there is no restart parameter
V[:,:Vsize] = PreConditioner(V[:,:Vsize], qmc_data, numSweeps)
# Davidson Routine
while (itt <= maxit) and (dk>=tol):
#print(V)
if (report_progress):
print("**********************")
print(" Davidson Iteration: ", itt)
axv[:,Vsize-1] = AxV(V[:,:Vsize], qmc_data)[:,0]
bxv[:,Vsize-1] = BxV(V[:,:Vsize], qmc_data)[:,0]
AV = np.dot(V[:,:Vsize].T, axv[:,:Vsize]) # Scattering linear operator
BV = np.dot(V[:,:Vsize].T, bxv[:,:Vsize]) # Fission linear operator
[Lambda, w] = sp.eig(AV, b=BV) # solve for eigenvalues and vectors
idx = Lambda.argsort() # get indices of eigenvalues from smallest to largest
Lambda = Lambda[idx] # sort eigenvalues from smalles to largest
assert(Lambda.imag.all() == 0.0)# there can't be any imaginary eigenvalues
Lambda = Lambda[:l].real # take the real component of the l largest eigenvalues
k = 1/Lambda
dk = abs(k - k_old)
if (report_progress):
print("K Effective: ", k)
print("dk: ",dk)
k_old = k
w = w[:,idx] # sort corresponding eigenvector
w = w[:,:l].real # take the l largest eigenvectors
u = np.dot(V[:,:Vsize],w) # Ritz vectors
res = AxV(u, qmc_data) - Lambda*BxV(u, qmc_data) # residual
t = PreConditioner(res, qmc_data, numSweeps)
if (Vsize <= m-l ):
Vsize += 1
V[:,:Vsize] = Gram(V[:,:Vsize-1],t) # appends new orthogonalization to V
else:
Vsize = 2
V[:,:Vsize] = Gram(u,t) # "restarts" by appending to a new array
if (itt==maxit):
print(" Convergence to tolerance not achieved: Maximum number of iterations.")
break
else:
print(" Successful convergence.")
itt += 1
keff = 1/Lambda
phi = V[:,0]
phi = phi/np.linalg.norm(phi).T
return phi, keff, itt
# =============================================================================
# Functions for Davidson's Method
# =============================================================================
def AxV(V, qmc_data):
"""
Linear operator for scattering term (I-L^(-1)S)*phi
"""
v = V[:,-1]
Nx = qmc_data.Nx
G = qmc_data.G
Nt = qmc_data.Nt
zed = np.zeros((Nx,G))
phi_in = np.reshape(v, (Nt,1))
axv = (phi_in - SI_Map(zed, phi_in, qmc_data))
return axv
def BxV(V, qmc_data):
"""
Linear operator for fission term (L^(-1)F*phi)
"""
v = V[:,-1]
Nx = qmc_data.Nx
G = qmc_data.G
Nv = int(Nx*G)
Nt = qmc_data.Nt
zed = np.zeros(Nt)
phi_in = np.reshape(v, (Nt,1))
if (qmc_data.source_tilt):
dphi = qmc_data.tallies.dphi_s
qmc_data.tallies.dphi_s = zed
bxv = SI_Map(phi_in, zed, qmc_data)
if (qmc_data.source_tilt):
qmc_data.tallies.dphi_s = dphi
v[Nv:] = dphi.reshape(Nv)
return bxv
def PreConditioner(V, qmc_data, numSweeps=8):
"""
Linear operator approximation of L^(-1)S
In this case the preconditioner is a specified number of purely scattering
transport sweeps.
"""
v = V[:,-1]
Nx = qmc_data.Nx
G = qmc_data.G
Nt = qmc_data.Nt
Nv = Nx*G
zed = np.zeros((Nx,G))
phi_in = np.reshape(v, (Nt,1))
for i in range(numSweeps):
phi_in = SI_Map(zed, phi_in, qmc_data)
return phi_in
def Gram(V,u):
"""
Modified Gram Schmidt
"""
w1 = u - np.dot(V,np.dot(V.T,u))
v1 = w1 / np.linalg.norm(w1)
w2 = v1 - np.dot(V,np.dot(V.T,v1))
v2 = w2 / np.linalg.norm(w2)
V = np.append(V, v2, axis=1)
return V
# =============================================================================
# Misc Functions
# =============================================================================
def SimData(phi, time, tol, nproc):
data = {
"phi": phi,
"run_time": time,
"tolerance": tol,
"nproc": nproc
}
return data
|
spasmann/iQMC
|
src/solvers/eigenvalue/solvers.py
|
solvers.py
|
py
| 14,940 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "numpy.linalg.norm",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "mpi4py.MPI.COMM_WORLD",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "mpi4py.MPI",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "mpi4py.MPI.COMM_WORLD",
"line_number": 125,
"usage_type": "attribute"
},
{
"api_name": "mpi4py.MPI",
"line_number": 125,
"usage_type": "name"
},
{
"api_name": "time.time",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "src.solvers.eigenvalue.maps.MatVec_data",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "numpy.append",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "numpy.reshape",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "src.solvers.fixed_source.solvers.Picard",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "scipy.sparse.linalg.LinearOperator",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "src.solvers.eigenvalue.maps.MatVec",
"line_number": 148,
"usage_type": "name"
},
{
"api_name": "src.solvers.eigenvalue.maps.MatVec",
"line_number": 149,
"usage_type": "name"
},
{
"api_name": "src.solvers.eigenvalue.maps.MatVec",
"line_number": 150,
"usage_type": "name"
},
{
"api_name": "src.solvers.eigenvalue.maps.MatVec",
"line_number": 151,
"usage_type": "name"
},
{
"api_name": "scipy.sparse.linalg.lgmres",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "scipy.sparse.linalg.gmres",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "scipy.sparse.linalg.bicgstab",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "numpy.reshape",
"line_number": 172,
"usage_type": "call"
},
{
"api_name": "src.functions.save_data.SaveData",
"line_number": 177,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 191,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 192,
"usage_type": "call"
},
{
"api_name": "mpi4py.MPI.COMM_WORLD",
"line_number": 229,
"usage_type": "attribute"
},
{
"api_name": "mpi4py.MPI",
"line_number": 229,
"usage_type": "name"
},
{
"api_name": "numpy.append",
"line_number": 235,
"usage_type": "call"
},
{
"api_name": "numpy.reshape",
"line_number": 238,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 242,
"usage_type": "call"
},
{
"api_name": "numpy.linalg.norm",
"line_number": 242,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 242,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros",
"line_number": 243,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 244,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 245,
"usage_type": "call"
},
{
"api_name": "numpy.dot",
"line_number": 281,
"usage_type": "call"
},
{
"api_name": "numpy.dot",
"line_number": 282,
"usage_type": "call"
},
{
"api_name": "scipy.linalg.eig",
"line_number": 283,
"usage_type": "call"
},
{
"api_name": "scipy.linalg",
"line_number": 283,
"usage_type": "name"
},
{
"api_name": "numpy.dot",
"line_number": 296,
"usage_type": "call"
},
{
"api_name": "numpy.linalg.norm",
"line_number": 314,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 314,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros",
"line_number": 330,
"usage_type": "call"
},
{
"api_name": "numpy.reshape",
"line_number": 331,
"usage_type": "call"
},
{
"api_name": "src.solvers.eigenvalue.maps.SI_Map",
"line_number": 332,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 346,
"usage_type": "call"
},
{
"api_name": "numpy.reshape",
"line_number": 347,
"usage_type": "call"
},
{
"api_name": "src.solvers.eigenvalue.maps.SI_Map",
"line_number": 352,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 373,
"usage_type": "call"
},
{
"api_name": "numpy.reshape",
"line_number": 374,
"usage_type": "call"
},
{
"api_name": "src.solvers.eigenvalue.maps.SI_Map",
"line_number": 376,
"usage_type": "call"
},
{
"api_name": "numpy.dot",
"line_number": 386,
"usage_type": "call"
},
{
"api_name": "numpy.linalg.norm",
"line_number": 387,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 387,
"usage_type": "attribute"
},
{
"api_name": "numpy.dot",
"line_number": 388,
"usage_type": "call"
},
{
"api_name": "numpy.linalg.norm",
"line_number": 389,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 389,
"usage_type": "attribute"
},
{
"api_name": "numpy.append",
"line_number": 390,
"usage_type": "call"
}
] |
27315049620
|
import facebook
from wordcloud import WordCloud, STOPWORDS
import matplotlib.pyplot as plt
import random
token='EAACEdEose0cBAHYBMbXyW9HwyJJIeCFBaWXEcLjsp3N0vB5HZApZCxqm7KQvVxb4fgF2ZA8nh625ZBJR3NzCMGc3ApU1MyZCYBwVF85LWxqdaEdt3cNVaS0y9CYsY4DDUjGcUeDZB0TMZBJwqdEBCZBClU00PeeMqnWmMpZCWCUFGmp12hZBZA3mLilYc450f4cWvkZD'
graph=facebook.GraphAPI(token)
profile=graph.get_object("me")
posts = graph.get_connections(profile['id'], 'posts')
messages=[]
for post in posts['data']:
try:
messages.append(post['message'])
except:
continue
wordlist=[]
wordfr=[]
s=" "
for m in messages:
words=m.split()
for w in words:
s=s+" "+w
wordlist.append(w)
print(w)
for w in wordlist:
wordfr.append(wordlist.count(w))
print("List\n" + str(wordlist) + "\n")
print("Frequencies\n" + str(wordfr) + "\n")
print("Pairs\n" + str(zip(wordlist, wordfr)))
wordcloud = WordCloud(relative_scaling = 1.0,stopwords = 'to of').generate(s)
plt.imshow(wordcloud)
plt.axis("off")
plt.show()
|
aparnamnn/ACM-Project
|
wordclouds.py
|
wordclouds.py
|
py
| 1,101 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "facebook.GraphAPI",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "wordcloud.WordCloud",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.axis",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 68,
"usage_type": "name"
}
] |
35510723799
|
# Experiment 24 - Tile Movement
#
# By Chris Herborth (https://github.com/Taffer)
# MIT license, see LICENSE.md for details.
import base64
import os
import pygame
import pygame.freetype
import pygame.gfxdraw
import struct
import sys
import time
import zlib
from xml.etree import ElementTree
SCREEN_TITLE = 'Experiment 24 - Tile Movement'
SCREEN_WIDTH = 1280 # 720p screen
SCREEN_HEIGHT = 720
BLACK = pygame.Color('black')
RED = pygame.Color('red')
WHITE = pygame.Color('white')
# Tiled map parser.
class Map:
def __init__(self, map_path: str) -> None:
tree = ElementTree.parse(map_path)
self.root = tree.getroot()
layers = self.root.findall('layer')
# Map size in tiles.
self.map_width = int(self.root.attrib['width'])
self.map_height = int(self.root.attrib['height'])
# Tile size in pixels.
self.tile_width = int(self.root.attrib['tilewidth'])
self.tile_height = int(self.root.attrib['tileheight'])
# Tileset and image atlas paths are relative to the map file.
prefix = os.path.split(map_path)[0]
tilesets = self.root.findall('tileset')
self.tiles = [None] # Index 0 means "don't draw a tile" in Tiled.
for tileset in tilesets:
tileset_path = os.path.join(prefix, tileset.attrib['source'])
tileset_prefix = os.path.split(tileset_path)[0]
tileset_tree = ElementTree.parse(tileset_path)
tileset_root = tileset_tree.getroot()
image = tileset_root.find('image')
image_path = os.path.join(tileset_prefix, image.attrib['source'])
texture = pygame.image.load(image_path).convert_alpha()
texture_rect = texture.get_rect()
# Create subsurfaces for the tiles in the atlas.
for y in range(texture_rect.height // self.tile_height):
for x in range(texture_rect.width // self.tile_width):
tile_rect = pygame.Rect(x * self.tile_width, y * self.tile_height, self.tile_width, self.tile_height)
self.tiles.append(texture.subsurface(tile_rect))
self.layer_data = {}
for layer in layers:
# Decode the layer data. This map is using CSV, which is easy; for
# help decoding other formats, check out my tileset crusher's code:
# https://github.com/Taffer/crushtileset/
data = layer.find('data')
data_contents = data.text
this_data = []
if data.attrib['encoding'] == 'csv':
lines = data_contents.split()
for line in lines:
for c in line.split(','):
if c != '':
this_data.append(int(c))
elif data.attrib['encoding'] == 'base64' and data.attrib.get('compression', 'none') == 'zlib':
the_data = base64.b64decode(data_contents)
# CSV data is organized into rows, so we make this one big row.
this_data = [x[0] for x in struct.iter_unpack('<I', zlib.decompress(the_data))]
else:
raise RuntimeError('Unsupported encoding/compression.')
self.layer_data[layer.attrib['name']] = this_data
def render(self, layer: str, surface: pygame.Surface, viewport: pygame.Rect, offset_x: int, offset_y: int) -> None:
# This use case seems to be faster than using blits(); the overhead of
# creating a list of tuples is probably what kills it.
max_x = min(viewport.width, self.map_width)
max_y = min(viewport.height, self.map_height)
for y in range(max_y):
for x in range(max_x):
tile = self.tiles[self.layer_data[layer][self.get_index(x + viewport.x, y + viewport.y)]]
target = pygame.Rect(offset_x + x * self.tile_width, offset_y + y * self.tile_height,
self.tile_width, self.tile_height)
if tile is not None:
surface.blit(tile, target)
def get_index(self, x: int, y: int) -> int:
return x + y * self.map_width
def get_tile(self, layer: str, x: int, y: int) -> int:
return self.layer_data[layer][self.get_index(x, y)]
# LPC Sprite for animation.
#
# This sets up a set of sprites, quads, etc. using the standard Liberated
# Pixel Cup sprite format:
#
# https://lpc.opengameart.org/static/lpc-style-guide/styleguide.html
#
# Specifically:
# * Each row is a complete animation cycle.
# * Rows are mostly in groups of four based on facing = away, left, forward,
# right.
# * Animation rows are: Spellcast, Thrust, Walk, Slash, Shoot, Hurt (only one
# facing for Hurt). We fake an Idle animation by cloning the first frame of
# Walk.
# * Are 64x64 on the sprite sheet.
# Note that this includes a non-standard animation, 'idle', made up of the
# first 'walk' frame.
LPC_ANIMATION = [
'spellcast',
'thrust',
'walk',
'slash',
'shoot',
'hurt',
'idle'
]
LPC_FACING = [
'away',
'left',
'forward',
'right'
]
FRAMES = {
LPC_ANIMATION[0]: 7, # spellcast
LPC_ANIMATION[1]: 8, # thrust
LPC_ANIMATION[2]: 9, # walk
LPC_ANIMATION[3]: 6, # slash
LPC_ANIMATION[4]: 13, # shoot
LPC_ANIMATION[5]: 6, # hurt
LPC_ANIMATION[6]: 1, # idle
}
class LPCSprite:
def __init__(self: 'LPCSprite', texture: pygame.Surface) -> None:
self.width = 64
self.height = 64
self.feet_x = self.width // 2 # Where are the feet relative to 0,0?
self.feet_y = self.height - 2
self.facing = LPC_FACING[2] # Default facing and animation.
self.animation = LPC_ANIMATION[2]
self.frame = 1
self.texture = texture
# Generate subsurfaces.
self.frames = {}
y = 0
for av in LPC_ANIMATION[:-2]: # "hurt" and "idle" are special cases
self.frames[av] = {}
for fv in LPC_FACING:
self.frames[av][fv] = []
for i in range(FRAMES[av]):
x = i * self.width
rect = pygame.Rect(x, y, self.width, self.height)
self.frames[av][fv].append(texture.subsurface(rect))
y += self.height
# "hurt" has to be special-cased because it only has one facing.
self.frames['hurt'] = {}
y = texture.get_height() - self.height
for fv in LPC_FACING:
# We'll use this animation for all four facings.
self.frames['hurt'][fv] = []
for i in range(FRAMES['hurt']):
x = i * self.width
rect = pygame.Rect(x, y, self.width, self.height)
for fv in LPC_FACING:
self.frames['hurt'][fv].append(texture.subsurface(rect))
# "idle" is fake, just the first frame from "walk"
self.frames['idle'] = {}
for fv in LPC_FACING:
self.frames['idle'][fv] = [self.frames['walk'][fv][0]]
def check_frame(self: 'LPCSprite') -> None:
if self.frame >= FRAMES[self.animation]:
self.frame = 0
def next_frame(self: 'LPCSprite') -> None:
self.frame += 1
self.check_frame()
def set_facing(self: 'LPCSprite', facing: str) -> None:
self.facing = facing
self.check_frame()
def set_animation(self: 'LPCSprite', animation: str) -> None:
self.animation = animation
self.check_frame()
def get_texture(self: 'LPCSprite') -> pygame.Surface:
return self.frames[self.animation][self.facing][self.frame]
class StateMachine:
def __init__(self: 'StateMachine', initial_state: 'StateBase'):
self.current = initial_state
self.current.enter()
def change(self: 'StateMachine', new_state: 'StateBase'):
self.current.exit()
self.current = new_state
self.current.enter()
def update(self: 'StateMachine', dt: float):
next_state = self.current.update(dt)
if next_state != self.current:
self.change(next_state)
class StateBase:
def __init__(self: 'StateBase', entity: 'Entity'):
self.entity = entity
self.ticks = 0
def enter(self: 'StateBase'):
pass
def exit(self: 'StateBase'):
pass
def update(self: 'StateBase', dt: float):
pass
class WaitState(StateBase):
def __init__(self: 'WaitState', entity: 'Entity'):
super().__init__(entity)
def enter(self: 'WaitState'):
self.entity.sprite.set_animation('idle')
def exit(self: 'WaitState'):
pass
def update(self: 'WaitState', dt: float):
walk = None
self.ticks += dt
if self.ticks > 0.1:
self.ticks -= 0.1
keystate = pygame.key.get_pressed()
if keystate[pygame.K_w] or keystate[pygame.K_UP]:
walk = {'x': 0, 'y': -1} # go up
elif keystate[pygame.K_s] or keystate[pygame.K_DOWN]:
walk = {'x': 0, 'y': 1} # go down
elif keystate[pygame.K_a] or keystate[pygame.K_LEFT]:
walk = {'x': -1, 'y': 0} # go left
elif keystate[pygame.K_d] or keystate[pygame.K_RIGHT]:
walk = {'x': 1, 'y': 0} # go right
if walk is not None:
return WalkState(self.entity, walk)
return self
class WalkState(StateBase):
def __init__(self: 'WalkState', entity: 'Entity', direction: dict):
super().__init__(entity)
self.direction = direction
self.target_x = self.entity.x
self.target_y = self.entity.y
def enter(self: 'WalkState'):
self.entity.sprite.set_animation('walk')
if self.direction['y'] == -1: # go up
self.entity.sprite.set_facing('away')
self.target_y -= 1
elif self.direction['y'] == 1: # go down
self.entity.sprite.set_facing('forward')
self.target_y += 1
elif self.direction['x'] == -1: # go left
self.entity.sprite.set_facing('left')
self.target_x -= 1
elif self.direction['x'] == 1: # go right
self.entity.sprite.set_facing('right')
self.target_x += 1
# Clamp movement to the map.
if self.target_x < 0:
self.target_x = 0
elif self.target_x >= self.entity.map.map_width:
self.target_x = self.entity.x
if self.target_y < 0:
self.target_y = 0
elif self.target_y >= self.entity.map.map_height:
self.target_y = self.entity.y
def exit(self: 'WalkState'):
pass
def update(self: 'WalkState', dt: float):
if self.target_x == self.entity.x and self.target_y == self.entity.y:
return WaitState(self.entity)
# TODO: needs tweening
self.ticks += dt
if self.ticks > 0.1:
if self.direction['y'] == -1: # go up
self.entity.offset_y -= 1
elif self.direction['y'] == 1: # go down
self.entity.offset_y += 1
elif self.direction['x'] == -1: # go left
self.entity.offset_x -= 1
elif self.direction['x'] == 1: # go right
self.entity.offset_x += 1
self.entity.sprite.next_frame()
if abs(self.entity.offset_x) >= self.entity.map.tile_width or \
abs(self.entity.offset_y) >= self.entity.map.tile_height: # Done moving.
self.entity.teleport(self.target_x, self.target_y)
return WaitState(self.entity)
return self
class Entity:
def __init__(self: 'Entity', sprite: LPCSprite, entity_map: Map):
self.sprite = sprite
self.map = entity_map
self.x = 0
self.y = 0
self.offset_x = 0 # Drawing offsets for inter-tile animation.
self.offset_y = 0
self.controller = StateMachine(WaitState(self))
def teleport(self: 'Entity', x: int, y: int):
self.x = x
self.y = y
self.offset_x = 0
self.offset_y = 0
def draw(self: 'Entity', surface: pygame.Surface, x: int, y: int):
# Draw sprite's feet at screen co-ords x, y.
rect = pygame.Rect(x - self.sprite.width // 4, y - self.sprite.height // 2, self.sprite.width, self.sprite.height)
rect.x += self.offset_x
rect.y += self.offset_y
surface.blit(self.sprite.get_texture(), rect)
def draw_tile(self: 'Entity', surface: pygame.Surface, x: int, y: int, tile_width: int, tile_height: int):
# Draw the tile the sprite thinks it's in.
rect = pygame.Rect(x * tile_width, y * tile_height, tile_width, tile_height)
pygame.gfxdraw.rectangle(surface, rect, RED)
class Demo:
def __init__(self: 'Demo', screen: pygame.Surface) -> None:
self.screen = screen
self.font = pygame.freetype.Font('resources/LiberationMono-Bold.ttf', 16)
self.sara_texture = pygame.image.load('resources/LPC_Sara/SaraFullSheet.png').convert_alpha()
self.map = Map('resources/grass-map.tmx')
# Viewport rect is in *tile* co-ordinates.
self.viewport = pygame.Rect(0, 0, 1280 // self.map.tile_width, 720 // self.map.tile_height)
self.sara = Entity(LPCSprite(self.sara_texture), self.map)
self.sara.teleport(10, 10) # Tile co-ordinates.
self.ticks = 0
def draw(self: 'Demo') -> None:
self.screen.fill(BLACK)
self.map.render('Tile Layer 1', self.screen, self.viewport, 0, 0)
self.font.render_to(self.screen, (10, 10), 'Use WASD or arrow keys to walk.', WHITE)
# Draw a rectangle to show which tile has the sprite's feet.
self.sara.draw_tile(self.screen, self.sara.x, self.sara.y, self.map.tile_width, self.map.tile_height)
# Draw Sara - We want her feet to be in the tile. This would be easier
# if the sprite were the same size as our map tiles...
self.sara.draw(self.screen, self.sara.x * self.map.tile_width, self.sara.y * self.map.tile_height)
def update(self: 'Demo', dt: float) -> None:
self.sara.controller.update(dt)
def main() -> None:
pygame.init()
screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))
pygame.display.set_caption(SCREEN_TITLE)
demo = Demo(screen)
now = time.time()
dt = 0
playing = True
while playing:
demo.draw()
pygame.display.flip()
dt = time.time() - now
now = time.time()
demo.update(dt)
for event in pygame.event.get():
if event.type == pygame.QUIT:
playing = False
elif event.type == pygame.KEYUP:
if event.key == pygame.K_ESCAPE:
playing = False
pygame.quit()
sys.exit()
if __name__ == '__main__':
main()
|
Taffer/pygame-experiments
|
24-tile-movement/main.py
|
main.py
|
py
| 14,875 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "pygame.Color",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "pygame.Color",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "pygame.Color",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree.parse",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "os.path.split",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "os.path.split",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "xml.etree.ElementTree.parse",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "pygame.image.load",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "pygame.Rect",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "base64.b64decode",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "struct.iter_unpack",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "zlib.decompress",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "pygame.Surface",
"line_number": 90,
"usage_type": "attribute"
},
{
"api_name": "pygame.Rect",
"line_number": 90,
"usage_type": "attribute"
},
{
"api_name": "pygame.Rect",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "pygame.Surface",
"line_number": 157,
"usage_type": "attribute"
},
{
"api_name": "pygame.Rect",
"line_number": 180,
"usage_type": "call"
},
{
"api_name": "pygame.Rect",
"line_number": 193,
"usage_type": "call"
},
{
"api_name": "pygame.Surface",
"line_number": 218,
"usage_type": "attribute"
},
{
"api_name": "pygame.key.get_pressed",
"line_number": 269,
"usage_type": "call"
},
{
"api_name": "pygame.key",
"line_number": 269,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_w",
"line_number": 270,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_UP",
"line_number": 270,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_s",
"line_number": 272,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_DOWN",
"line_number": 272,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_a",
"line_number": 274,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_LEFT",
"line_number": 274,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_d",
"line_number": 276,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_RIGHT",
"line_number": 276,
"usage_type": "attribute"
},
{
"api_name": "pygame.Surface",
"line_number": 367,
"usage_type": "attribute"
},
{
"api_name": "pygame.Rect",
"line_number": 369,
"usage_type": "call"
},
{
"api_name": "pygame.Surface",
"line_number": 374,
"usage_type": "attribute"
},
{
"api_name": "pygame.Rect",
"line_number": 376,
"usage_type": "call"
},
{
"api_name": "pygame.gfxdraw.rectangle",
"line_number": 377,
"usage_type": "call"
},
{
"api_name": "pygame.gfxdraw",
"line_number": 377,
"usage_type": "attribute"
},
{
"api_name": "pygame.Surface",
"line_number": 381,
"usage_type": "attribute"
},
{
"api_name": "pygame.freetype.Font",
"line_number": 384,
"usage_type": "call"
},
{
"api_name": "pygame.freetype",
"line_number": 384,
"usage_type": "attribute"
},
{
"api_name": "pygame.image.load",
"line_number": 386,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 386,
"usage_type": "attribute"
},
{
"api_name": "pygame.Rect",
"line_number": 390,
"usage_type": "call"
},
{
"api_name": "pygame.init",
"line_number": 416,
"usage_type": "call"
},
{
"api_name": "pygame.display.set_mode",
"line_number": 418,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 418,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.set_caption",
"line_number": 419,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 419,
"usage_type": "attribute"
},
{
"api_name": "time.time",
"line_number": 423,
"usage_type": "call"
},
{
"api_name": "pygame.display.flip",
"line_number": 430,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 430,
"usage_type": "attribute"
},
{
"api_name": "time.time",
"line_number": 432,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 433,
"usage_type": "call"
},
{
"api_name": "pygame.event.get",
"line_number": 437,
"usage_type": "call"
},
{
"api_name": "pygame.event",
"line_number": 437,
"usage_type": "attribute"
},
{
"api_name": "pygame.QUIT",
"line_number": 438,
"usage_type": "attribute"
},
{
"api_name": "pygame.KEYUP",
"line_number": 440,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_ESCAPE",
"line_number": 441,
"usage_type": "attribute"
},
{
"api_name": "pygame.quit",
"line_number": 444,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 445,
"usage_type": "call"
}
] |
4159403258
|
import math
import random
import vector3
from rtweekend import random_double
import multiprocessing
from multiprocessing import Process, Array
from ctypes import c_char_p
from color import write_color
from vector3 import vec3, random_in_hemisphere
from ray import ray
import rtweekend
from hittable import hit_record
from hittable_list import hit_ls
from sphere import sphere
from camera import camera
from material import lambertian, metal, dielectric
#image width
def multi_render(return_string, id, fromI, toI, image_height, image_width ,samples_per_pixel, cam, world, max_depth):
for j in range( toI , fromI -1,-1):
for i in range(0,image_width):
pixel_color = vec3(0,0,0)
for _ in range(samples_per_pixel):
u = (i + random.random()) / (image_width-1)
v = (j + random.random()) / (image_height-1)
r = cam.get_ray(u, v)
pixel_color = pixel_color + ray_color(r, world, max_depth)
return_string[id] += write_color(pixel_color, samples_per_pixel)
def random_scene():
world = []
ground_material = lambertian(vec3(0.5, 0.5, 0.5))
world.append(sphere(vec3(0,-1000,0), 1000, ground_material))
for a in range(-11,11,1):
for b in range(-11,11,1):
choose_mat = random.random()
center = vec3(a + 0.9*random.random(), 0.2, b + 0.9*random.random())
if((center - vec3(4, 0.2, 0)).length() > 0.9):
if (choose_mat < 0.8):
#diffuse
albedo = vector3.random().mult( vector3.random())
sphere_material = lambertian(albedo)
world.append(sphere(center, 0.2, sphere_material))
elif (choose_mat < 0.95):
#metal
albedo = vector3.random(0.5, 1)
fuzz = random_double(0, 0.5)
sphere_material = metal(albedo, fuzz)
world.append(sphere(center, 0.2, sphere_material))
else:
#glass
sphere_material = dielectric(1.5)
world.append(sphere(center, 0.2, sphere_material))
material1 = dielectric(1.5)
world.append(sphere(vec3(0, 1, 0), 1.0, material1))
material2 = lambertian(vec3(0.4, 0.2, 0.1))
world.append(sphere(vec3(-4, 1, 0), 1.0, material2))
material3 = metal(vec3(0.7, 0.6, 0.5), 0.0)
world.append(sphere(vec3(4, 1, 0), 1.0, material3))
return world
def ray_color(r, world, depth):
rec = hit_record(vec3(0,0,0), vec3(0,0,0), None, 0.0, False)
if depth <= 0:
return vec3(0,0,0)
hit_anything, rec = hit_ls(world, r, 0.001, rtweekend.infinity, rec)
if hit_anything:
scat, scattered, attenuation = rec.mat_ptr.scatter(r,rec)
if scat:
return ray_color(scattered, world,depth-1).mult(attenuation)
return vec3(0,0,0)
unit_direction = r.get_direction().unit_vector()
t = 0.5 * (unit_direction.y() + 1.0)
return vec3(1,1,1)*(1-t) + vec3(0.5,0.7,1.0)*t
if __name__ == '__main__':
#Image
aspect_ratio = 3.0 / 2.0
image_width = 384 # optimised size for an 8-core CPU
image_height = int(image_width / aspect_ratio)
samples_per_pixel = 50
max_depth = 50
#World
world = random_scene()
# camera
lookfrom = vec3(13,2,3)
lookat = vec3(0,0,0)
vup = vec3(0,1,0)
dist_to_focus = 10.0
aperture = 0.1
cam = camera(lookfrom, lookat, vup, 20, aspect_ratio, aperture, dist_to_focus)
# render
result_string = ""
result_string += "P3 \n" + str(image_width) + ' ' + str(image_height) + "\n255\n"
number_of_cores = multiprocessing.cpu_count()
process = []
manager = multiprocessing.Manager()
return_str = manager.dict()
for i in range(number_of_cores):
return_str[i] = ''
process.append(Process(target = multi_render, args=(return_str,i,int(i*image_height/number_of_cores), int((i+1)*image_height/number_of_cores), image_height, image_width,samples_per_pixel, cam, world, max_depth),))
process[i].start()
for i in range(number_of_cores):
process[i].join()
for i in range(number_of_cores-1, -1, -1):
result_string += return_str[i]
with open('image.ppm', 'w') as f:
f.write(result_string)
f.close()
|
mk2510/ray_tracing_project
|
raytracing_in_a_weekend/main.py
|
main.py
|
py
| 4,391 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "vector3.vec3",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "random.random",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "random.random",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "color.write_color",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "material.lambertian",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "vector3.vec3",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "sphere.sphere",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "vector3.vec3",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "random.random",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "vector3.vec3",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "random.random",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "vector3.vec3",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "vector3.random",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "material.lambertian",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "sphere.sphere",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "vector3.random",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "rtweekend.random_double",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "material.metal",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "sphere.sphere",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "material.dielectric",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "sphere.sphere",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "material.dielectric",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "sphere.sphere",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "vector3.vec3",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "material.lambertian",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "vector3.vec3",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "sphere.sphere",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "vector3.vec3",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "material.metal",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "vector3.vec3",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "sphere.sphere",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "vector3.vec3",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "hittable.hit_record",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "vector3.vec3",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "vector3.vec3",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "hittable_list.hit_ls",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "rtweekend.infinity",
"line_number": 80,
"usage_type": "attribute"
},
{
"api_name": "vector3.vec3",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "vector3.vec3",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "vector3.vec3",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "vector3.vec3",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "vector3.vec3",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "camera.camera",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "multiprocessing.cpu_count",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "multiprocessing.Manager",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "multiprocessing.Process",
"line_number": 124,
"usage_type": "call"
}
] |
39485957424
|
"""Модуль базы данных хранящей пользователей и их историю"""
import datetime as dt
from typing import Optional
import enum
from functools import cached_property
import sqlalchemy as sa
from sqlalchemy import create_engine, select, ForeignKey
from sqlalchemy.orm import (
Session,
DeclarativeBase,
Mapped,
mapped_column,
sessionmaker,
relationship
)
from .. import settings
from ..server.auth import AuthMixin
engine = create_engine(settings.database_path)
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
class Base(DeclarativeBase):
pass
class User(Base, AuthMixin):
__tablename__ = 'user'
id: Mapped[int] = mapped_column(primary_key=True)
account_name: Mapped[str]
password: Mapped[str]
has_entered: Mapped[Optional[bool]] = mapped_column(default=False)
histories: Mapped[list['History']] = relationship(
back_populates='user',
cascade='all, delete'
)
contacts: Mapped[list['Contact']] = relationship(
back_populates='user',
foreign_keys='Contact.user_id'
)
friends_with_us: Mapped[list['Contact']] = relationship(
back_populates='user',
foreign_keys='Contact.friend_id'
)
@property
def friends(self) -> list['User']:
session = Session.object_session(self)
subq = select(Contact).where(Contact.user_id == self.id).subquery()
stmt = select(User).join(subq, User.id == subq.c.friend_id)
select(User).join_from(User, User.contacts).where(Contact.user_id == 1)
return session.scalars(stmt).all()
@cached_property
def user_service(self):
from ..server.user_service import UserService
session = Session.object_session(self)
return UserService(session)
def _get_last_event_time(self, event: 'History.Event'):
session = Session.object_session(self)
stm = (
select(History.time)
.filter_by(user_id=self.id, event=event)
.order_by(History.time.desc())
)
result = session.scalars(stm).first()
return result
@property
def last_login(self):
return self._get_last_event_time(event=History.Event.login)
@property
def last_logout(self):
return self._get_last_event_time(event=History.Event.logout)
@property
def last_send_message(self):
return self._get_last_event_time(event=History.Event.user_send_message_to_server)
@property
def last_get_message(self):
return self._get_last_event_time(event=History.Event.user_get_message_from_server)
def __repr__(self):
return (
f'User(id={self.id}, account_name={self.account_name}),'
)
def is_online(self):
if not self.has_entered:
return False
return True
def check_password(self, password: str):
return self.password == password
class History(Base):
__tablename__ = 'history'
class Event(str, enum.Enum):
login = 'login'
logout = 'logout'
user_send_message_to_server = 'user_send_message_to_server'
user_get_message_from_server = 'user_get_message_from_server'
id: Mapped[int] = mapped_column(primary_key=True)
user_id: Mapped[int | None] = mapped_column(ForeignKey('user.id'))
user: Mapped[User | None] = relationship(back_populates='histories')
event: Mapped[Event] = mapped_column(sa.Enum(Event))
time: Mapped[dt.datetime]
adress: Mapped[str | None]
class Contact(Base):
__tablename__ = 'contact'
id: Mapped[int] = mapped_column(primary_key=True)
user_id: Mapped[int] = mapped_column(
ForeignKey('user.id')
)
user: Mapped[User] = relationship(
back_populates='contacts',
foreign_keys=[user_id]
)
friend_id: Mapped[int] = mapped_column(
ForeignKey('user.id')
)
friend: Mapped[User] = relationship(
back_populates='friends_with_us',
foreign_keys=[friend_id]
)
Base.metadata.create_all(bind=engine)
def create_test_data():
from ..server import test_data
with SessionLocal() as session:
creator = test_data.TestData(session)
creator.create_data_if_not_exist(session)
create_test_data()
|
DemidovEvg/async_chat
|
src/nano_async_chat/async_chat/server/db.py
|
db.py
|
py
| 4,316 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "sqlalchemy.create_engine",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.orm.sessionmaker",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.orm.DeclarativeBase",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "server.auth.AuthMixin",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.orm.Mapped",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.orm.mapped_column",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.orm.Mapped",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.orm.Mapped",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.orm.Mapped",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.orm.mapped_column",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.orm.Mapped",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.orm.relationship",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.orm.Mapped",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.orm.relationship",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.orm.Mapped",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.orm.relationship",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.orm.Session.object_session",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.orm.Session",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.select",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.select",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.select",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.orm.Session.object_session",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.orm.Session",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "server.user_service.UserService",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "functools.cached_property",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.orm.Session.object_session",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.orm.Session",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.select",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "enum.Enum",
"line_number": 105,
"usage_type": "attribute"
},
{
"api_name": "sqlalchemy.orm.Mapped",
"line_number": 111,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.orm.mapped_column",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.orm.Mapped",
"line_number": 112,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.orm.mapped_column",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.ForeignKey",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.orm.Mapped",
"line_number": 113,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.orm.relationship",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.orm.Mapped",
"line_number": 114,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.orm.mapped_column",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Enum",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.orm.Mapped",
"line_number": 115,
"usage_type": "name"
},
{
"api_name": "datetime.datetime",
"line_number": 115,
"usage_type": "attribute"
},
{
"api_name": "sqlalchemy.orm.Mapped",
"line_number": 116,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.orm.Mapped",
"line_number": 122,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.orm.mapped_column",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.orm.Mapped",
"line_number": 123,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.orm.mapped_column",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.ForeignKey",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.orm.Mapped",
"line_number": 126,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.orm.relationship",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.orm.Mapped",
"line_number": 131,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.orm.mapped_column",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.ForeignKey",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.orm.Mapped",
"line_number": 134,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.orm.relationship",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "server.test_data.TestData",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "server.test_data",
"line_number": 146,
"usage_type": "name"
}
] |
8947958268
|
from django.db import models
import ast
class ListField(models.TextField):
__metaclass__ = models.SubfieldBase
description = "Stores a python list"
def __init__(self, *args, **kwargs):
super(ListField, self).__init__(*args, **kwargs)
def to_python(self, value):
if not value:
value = []
if isinstance(value, list):
return value
return ast.literal_eval(value)
def get_prep_value(self, value):
if value is None:
return value
return unicode(value)
def value_to_string(self, obj):
value = self._get_val_from_obj(obj)
return self.get_prep_value(value)
class Student(models.Model):
first_name = models.CharField(max_length=50)
last_name = models.CharField(max_length=50)
netID = models.CharField(max_length=8, unique=True)
# blocks = ListField('Busy blocks',blank=True)
def blocks(self):
blks = []
for course in self.course_set.all():
for blk in course.blocks:
blks.append(blk)
return blks
def __unicode__(self): # Python 3: def __str__(self):
return self.netID
class Instructor(models.Model):
first_name = models.CharField(max_length=50)
last_name = models.CharField(max_length=50)
netID = models.CharField(max_length=8, unique=True)
faculty = models.BooleanField(default=False)
def __unicode__(self): # Python 3: def __str__(self):
return self.netID
def billable(self):
if self.faculty:
return 0
my_courses = self.course_set.all()
hours = 0.0
for course in my_courses:
hours += len(course.blocks)/2.0
return hours
def full_name(self):
return self.first_name+" "+self.last_name
class Course(models.Model):
courseID = models.CharField(max_length=20, unique=True)
title = models.CharField(max_length=75,default='Title needed')
description = models.TextField(max_length=1000)
other_section = models.ManyToManyField('self', blank=True)
min_enroll = models.IntegerField(default=0)
max_enroll = models.IntegerField(default=200)
cancelled = models.BooleanField(default=False)
room = models.CharField(max_length=200,default='tbd')
blocks = ListField('Course blocks')
schedule = models.CharField(max_length=50)
students = models.ManyToManyField(Student, through='Registration')
instructors = models.ManyToManyField(Instructor)
def __unicode__(self): # Python 3: def __str__(self):
return self.courseID
def current_enroll(self):
return len(self.students.all())
def is_full(self):
num_enroll = self.current_enroll()
return num_enroll >= self.max_enroll
def meets_min_requirements(self):
num_enroll = self.current_enroll()
return num_enroll >= self.min_enroll
def get_instructors(self):
return ", ".join([i.full_name() for i in self.instructors.all()])
is_full.boolean = True
meets_min_requirements.boolean = True
class Registration(models.Model):
student = models.ForeignKey(Student)
course = models.ForeignKey(Course)
timestamp = models.DateTimeField('Registration timestamp',auto_now_add=True)
attendance_M = models.BooleanField(default=False)
attendance_Tu = models.BooleanField(default=False)
attendance_W = models.BooleanField(default=False)
attendance_Th = models.BooleanField(default=False)
attendance_F = models.BooleanField(default=False)
def __unicode__(self): # Python 3: def __str__(self):
return self.student.netID+"-"+self.course.courseID
|
epkugelmass/USG-srv-dev
|
tigerapps/wintersession/models.py
|
models.py
|
py
| 3,678 |
python
|
en
|
code
| null |
github-code
|
6
|
[
{
"api_name": "django.db.models.TextField",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "django.db.models.SubfieldBase",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "ast.literal_eval",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "django.db.models.Model",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "django.db.models.Model",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "django.db.models.BooleanField",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "django.db.models.Model",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "django.db.models.TextField",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 70,
"usage_type": "name"
},
{
"api_name": "django.db.models.ManyToManyField",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "django.db.models.IntegerField",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "django.db.models.IntegerField",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "django.db.models.BooleanField",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "django.db.models.ManyToManyField",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "django.db.models.ManyToManyField",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "django.db.models.Model",
"line_number": 101,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 101,
"usage_type": "name"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 102,
"usage_type": "name"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 103,
"usage_type": "name"
},
{
"api_name": "django.db.models.DateTimeField",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 104,
"usage_type": "name"
},
{
"api_name": "django.db.models.BooleanField",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 106,
"usage_type": "name"
},
{
"api_name": "django.db.models.BooleanField",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 107,
"usage_type": "name"
},
{
"api_name": "django.db.models.BooleanField",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 108,
"usage_type": "name"
},
{
"api_name": "django.db.models.BooleanField",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 109,
"usage_type": "name"
},
{
"api_name": "django.db.models.BooleanField",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 110,
"usage_type": "name"
}
] |
70439517629
|
import pyautogui
import cv2 as cv
import numpy as np
import keyboard
import time
from math import sqrt
from PIL import ImageGrab
import win32api, win32con
# https://stackoverflow.com/questions/5906693/how-to-reduce-the-number-of-colors-in-an-image-with-opencv
def kmeans_color_quantization(image, clusters=8, rounds=1):
h, w = image.shape[:2]
samples = np.zeros([h*w,3], dtype=np.float32)
count = 0
for x in range(h):
for y in range(w):
samples[count] = image[x][y]
count += 1
compactness, labels, centers = cv.kmeans(samples,
clusters,
None,
(cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 10000, 0.0001),
rounds,
cv.KMEANS_RANDOM_CENTERS)
centers = np.uint8(centers)
res = centers[labels.flatten()]
return res.reshape((image.shape))
class GarticBot:
def __init__(self, DEBUG=False):
self.debug = DEBUG
BOARD_ORIGIN = (692, 170)
BOARD_RESOLUTION = (962, 530)
PENCIL = (-150, 25)
PENCIL_SLIDER = (-147, 772)
PENCIL_SLIDER_MIN_RANGE = (790, 665)
PALLETE = (-100, 570)
# DRAWING_RESOLUTION = (120, 66)
# DRAWING_RESOLUTION = (150, 82)
DRAWING_RESOLUTION = (200, 110)
COLOR_VARIANCE = 128
WHITE_THRESHOLD = 55
CLICK_DELAY = 1e-10
CLICK_DELAY_INTERVAL = 5
pyautogui.PAUSE = CLICK_DELAY
def _getRelativePos(self, pos):
return (pos[0] + self.BOARD_ORIGIN[0], pos[1] + self.BOARD_ORIGIN[1])
def _downScale(self, image):
f1 = self.DRAWING_RESOLUTION[0] / image.shape[1]
f2 = self.DRAWING_RESOLUTION[1] / image.shape[0]
dim = (int(image.shape[1] * min(f1, f2)), int(image.shape[0] * min(f1, f2)))
resized = cv.resize(image, dim)
downscaled = kmeans_color_quantization(resized, clusters=self.COLOR_VARIANCE, rounds=1)
if self.debug:
cv.imshow("IMAGE", cv.resize(image, (600, int(image.shape[0]*600/image.shape[1])), interpolation=cv.INTER_AREA))
cv.waitKey(600)
cv.imshow("IMAGE", cv.resize(resized, (600, int(resized.shape[0]*600/resized.shape[1])), interpolation=cv.INTER_AREA))
cv.waitKey(600)
cv.imshow("IMAGE", cv.resize(downscaled, (600, int(downscaled.shape[0]*600/downscaled.shape[1])), interpolation=cv.INTER_AREA))
cv.waitKey(600)
cv.destroyAllWindows()
return downscaled
def _getColorClusters(self, image):
image = cv.cvtColor(image, cv.COLOR_BGR2RGB)
clusters = {}
for j in range(len(image)):
for i in range(len(image[0])):
color = f"{image[j][i][0]},{image[j][i][1]},{image[j][i][2]}"
if color in clusters:
clusters[color].append((i, j))
else:
clusters.update({color: [(i, j)]})
return clusters
def _equipPencil(self):
pyautogui.click(self._getRelativePos(self.PENCIL))
def _setColor(self, color):
pyautogui.click(self._getRelativePos(self.PALLETE))
time.sleep(0.1)
color = color.split(",")
keyboard.send('tab')
time.sleep(0.01)
keyboard.send('tab')
time.sleep(0.01)
keyboard.send('tab')
time.sleep(0.01)
keyboard.write(color[0])
time.sleep(0.01)
keyboard.send('tab')
time.sleep(0.01)
keyboard.write(color[1])
time.sleep(0.01)
keyboard.send('tab')
time.sleep(0.01)
keyboard.write(color[2])
time.sleep(0.01)
keyboard.send('enter')
time.sleep(0.1)
def _getClickPosition(self, pos):
upscale_factor_x = self.BOARD_RESOLUTION[0] / self.DRAWING_RESOLUTION[0]
upscale_factor_y = self.BOARD_RESOLUTION[1] / self.DRAWING_RESOLUTION[1]
pos = (int(pos[0]*upscale_factor_x), int(pos[1]*upscale_factor_y))
return pos
def _setPencilThickness(self, thickness):
pyautogui.moveTo(self._getRelativePos(self.PENCIL_SLIDER))
def draw(self, image):
print("DOWNSCALING")
downscaled = self._downScale(image)
clusters = self._getColorClusters(downscaled)
while True:
if keyboard.is_pressed('alt+s'):
print("STOPPING")
return
if keyboard.is_pressed('alt+q'):
quit()
if keyboard.is_pressed('alt+d'):
break
time.sleep(0.2)
print("DRAWING")
self._equipPencil()
for color in clusters:
channels = color.split(",")
dist = sqrt(pow(int(channels[0])-255, 2) + pow(int(channels[1])-255, 2) + pow(int(channels[2])-255, 2))
if dist < self.WHITE_THRESHOLD:
continue
print(f'Color: {color}')
self._setColor(color)
for i, pixel in enumerate(clusters[color]):
pos = self._getClickPosition(pixel)
pos = self._getRelativePos(pos)
win32api.mouse_event(win32con.MOUSEEVENTF_MOVE | win32con.MOUSEEVENTF_ABSOLUTE, int(pos[0]/win32api.GetSystemMetrics(0)*65535), int(pos[1]/win32api.GetSystemMetrics(1)*65535) ,0 ,0)
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN, 0, 0, 0, 0)
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP, 0, 0, 0, 0)
if i%self.CLICK_DELAY_INTERVAL==0: time.sleep(self.CLICK_DELAY)
if keyboard.is_pressed('alt+s'):
print("STOPED")
return
print("DONE")
def run(self):
while True:
if keyboard.is_pressed('alt+q'):
break
if keyboard.is_pressed('alt+c'):
image = np.array(ImageGrab.grabclipboard())[:,:,:3]
image = cv.cvtColor(image, cv.COLOR_BGR2RGB)
self.draw(image)
bot = GarticBot(DEBUG=True)
bot.run()
|
JirkaKlimes/gartic.io_bot
|
main.py
|
main.py
|
py
| 5,990 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "numpy.zeros",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "cv2.kmeans",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "cv2.TERM_CRITERIA_EPS",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "cv2.TERM_CRITERIA_MAX_ITER",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "cv2.KMEANS_RANDOM_CENTERS",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "numpy.uint8",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "pyautogui.PAUSE",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "cv2.resize",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "cv2.resize",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "cv2.INTER_AREA",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_name": "cv2.waitKey",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "cv2.resize",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "cv2.INTER_AREA",
"line_number": 69,
"usage_type": "attribute"
},
{
"api_name": "cv2.waitKey",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "cv2.resize",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "cv2.INTER_AREA",
"line_number": 71,
"usage_type": "attribute"
},
{
"api_name": "cv2.waitKey",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "cv2.destroyAllWindows",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2RGB",
"line_number": 78,
"usage_type": "attribute"
},
{
"api_name": "pyautogui.click",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "pyautogui.click",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "keyboard.send",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "keyboard.send",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "keyboard.send",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "keyboard.write",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "keyboard.send",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "keyboard.write",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "keyboard.send",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "keyboard.write",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "keyboard.send",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "pyautogui.moveTo",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "keyboard.is_pressed",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "keyboard.is_pressed",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "keyboard.is_pressed",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "math.sqrt",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "win32api.mouse_event",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "win32con.MOUSEEVENTF_MOVE",
"line_number": 152,
"usage_type": "attribute"
},
{
"api_name": "win32con.MOUSEEVENTF_ABSOLUTE",
"line_number": 152,
"usage_type": "attribute"
},
{
"api_name": "win32api.GetSystemMetrics",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "win32api.mouse_event",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "win32con.MOUSEEVENTF_LEFTDOWN",
"line_number": 153,
"usage_type": "attribute"
},
{
"api_name": "win32api.mouse_event",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "win32con.MOUSEEVENTF_LEFTUP",
"line_number": 154,
"usage_type": "attribute"
},
{
"api_name": "time.sleep",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "keyboard.is_pressed",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "keyboard.is_pressed",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "keyboard.is_pressed",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "PIL.ImageGrab.grabclipboard",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "PIL.ImageGrab",
"line_number": 166,
"usage_type": "name"
},
{
"api_name": "cv2.cvtColor",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2RGB",
"line_number": 167,
"usage_type": "attribute"
}
] |
21253145382
|
from django.shortcuts import render
from django.views.generic import View #导入View
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from wanwenyc.settings import DJANGO_SERVER_YUMING,MEDIA_ROOT
from .models import RdmAutoStatic,RdmStatic,RdmConfig
# Create your views here.
#根据数据库内容自动合并生成任务名称和任务详情及问题详情
def RdmAutoStaticRequest(request, rdmautostatic_id, trackback=None):
rdmautostatic = RdmAutoStatic.objects.get(id=int(rdmautostatic_id)) # 获取用例
people_name = rdmautostatic.people_name
start_date = str(rdmautostatic.start_date)
end_date = str(rdmautostatic.end_date)
print(people_name)
print(start_date)
print(end_date)
from django.db.models import Q
# s使用Q来筛选不等于'<span style="margin-left: 19px;color: gray;">无</span>'的项
mubiao_data_list = RdmStatic.objects.filter(~Q(day_task_name='[]')).\
filter(~Q(week_task_deck='<span style="margin-left: 19px;color: gray;">无</span>')).\
filter(people_name=people_name).filter(is_week=False).order_by('-id') #筛选出有效的相应人员的日记录,按照id倒序排列
all_task_name_list = []
all_task_desc_list = []
all_task_quse_list = []
for mubiao_data_one in mubiao_data_list:
day_date = mubiao_data_one.day_date
new_day_date_list = []
for one_char in day_date:
if one_char in "0123456789-":
new_day_date_list.append(one_char)
new_day_date = "".join(new_day_date_list)
#获取到各项的日期
print("各项的日期为:%s"% new_day_date)
if start_date <= new_day_date and new_day_date<=end_date:
print("在时间范围内的日期:%s" % new_day_date)
#统计在时间范围内的数据
#统计所有的任务名称
day_task_name = mubiao_data_one.day_task_name
print(day_task_name)
print(type(day_task_name))
day_task_name_list = eval(day_task_name) #eval()函数将列表样式的字符串自动转为列表
print("day_task_name_list:")
print(day_task_name_list)
print(type(day_task_name_list))
for day_task_name_one in day_task_name_list:
if day_task_name_one not in all_task_name_list:
all_task_name_list.append(day_task_name_one)
#统计所有任务详情
day_task_desc = mubiao_data_one.day_task_desc
if day_task_desc not in all_task_desc_list:
all_task_desc_list.append(day_task_desc)
#统计所有问题详情
day_task_quse = mubiao_data_one.day_task_quse
if day_task_quse not in all_task_quse_list:
all_task_quse_list.append(day_task_quse)
print("所有任务名称:")
print(all_task_name_list)
print("所有任务详情:")
print(all_task_desc_list)
print("所有问题详情:")
print(all_task_quse_list)
rdmautostatic.all_task_name = all_task_name_list
rdmautostatic.all_task_desc = all_task_desc_list
rdmautostatic.all_task_quse = all_task_quse_list
rdmautostatic.save() #保存入库
print("重定向返回'/reportdatas/rdmautostatic/'")
return HttpResponseRedirect('/reportdatas/rdmautostatic/') #重定向到该页面
#根据数据库内容自动合并生成任务名称和任务详情及问题详情
def RdmConfigRequest(request, rdmconfig_id, trackback=None):
rdmconfig = RdmConfig.objects.get(id=int(rdmconfig_id)) # 获取用例
rdm_url = rdmconfig.rdm_url
rdm_account = rdmconfig.rdm_account
rdm_password = rdmconfig.rdm_password
recode_year = rdmconfig.recode_year
print("RDM网址:%s" % rdm_url)
print("RDM登录账号:%s" % rdm_account)
print("RDM登录密码:%s" % rdm_password)
print("RDM统计日志年限:%s" % recode_year)
from .autoStaticRDMTask import WebRemoteUphild
loginurl= rdm_url
loginaccount= rdm_account
loginpassword= rdm_password
predate = recode_year
print("开始执行异步函数")
wc = WebRemoteUphild(loginurl=loginurl,loginaccount=loginaccount,loginpassword=loginpassword,predate=predate)
wc.run()
print("函数开始运行完成后")
print("重定向返回'/reportdatas/rdmconfig/'")
return HttpResponseRedirect('/reportdatas/rdmconfig/') #重定向到该页面
|
wawj901124/shangbaogongju
|
apps/reportdatas/views.py
|
views.py
|
py
| 4,480 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "models.RdmAutoStatic.objects.get",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "models.RdmAutoStatic.objects",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "models.RdmAutoStatic",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "models.RdmStatic.objects.filter",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "models.RdmStatic.objects",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "models.RdmStatic",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "django.db.models.Q",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "django.db.models.Q",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponseRedirect",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "models.RdmConfig.objects.get",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "models.RdmConfig.objects",
"line_number": 82,
"usage_type": "attribute"
},
{
"api_name": "models.RdmConfig",
"line_number": 82,
"usage_type": "name"
},
{
"api_name": "autoStaticRDMTask.WebRemoteUphild",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponseRedirect",
"line_number": 104,
"usage_type": "call"
}
] |
39795452637
|
# coding=utf-8
import requests
import re
import execjs
import json
from bs4 import BeautifulSoup
import smtplib
from email.mime.text import MIMEText
from email.utils import formataddr
sendAddress = ''
emailPsw = ''
receiveAddress = ''
username = ''
psw = ''
def loadConfig():
with open('config.json', 'r', encoding='utf-8') as f:
config = f.read()
configJson = json.loads(config)
print(configJson)
global sendAddress, emailPsw, receiveAddress, username, psw
sendAddress = configJson['sendAddress']
emailPsw = configJson['emailPsw']
receiveAddress = configJson['receiveAddress']
username = configJson['username']
psw = configJson['psw']
def sendEmail(msgJson):
try:
stuName = msgJson['data']['owner']['name']
info = stuName + '\n您已打卡成功'
except:
info = '打卡失败\n详细信息:' + str(msgJson)
msg = MIMEText(info, 'plain', 'utf-8') # 填写邮件内容
msg['From'] = formataddr(["厦门大学健康打卡", sendAddress]) # 括号里的对应发件人邮箱昵称、发件人邮箱账号
msg['To'] = formataddr([receiveAddress, receiveAddress]) # 括号里的对应收件人邮箱昵称、收件人邮箱账号
msg['Subject'] = "厦门大学健康打卡" # 邮件的主题,也可以说是标题
server = smtplib.SMTP_SSL("smtp.qq.com", 465) # 发件人邮箱中的SMTP服务器
server.login(sendAddress, emailPsw) # 括号中对应的是发件人邮箱账号、邮箱授权码
server.sendmail(sendAddress, [receiveAddress, ], msg.as_string()) # 括号中对应的是发件人邮箱账号、收件人邮箱账号、发送邮件
server.quit() # 关闭连接
def encrypt(pwd, key):
"""
调用js加密函数
:param pwd:
:param key:
:return:
"""
with open('encrypt.js', 'r', encoding='utf-8') as f:
j = f.read()
js = execjs.compile(j)
return js.call('encryptAES', pwd, key)
def getDataFrame(session, headers, businessId):
"""
获得打卡的post json框架
:param session:
:param headers:
:param businessId:
:return: 框架
"""
authorityMap = {'readonly': {'hide': 'true', "readonly": 'false'}, 'hide': {"hide": 'true', "readonly": 'false'},
'required': {'hide': 'true', "readonly": 'false'},
'optional': {'hide': 'true', "readonly": 'false'}}
list = []
dataFrameJson = session.get(
'https://xmuxg.xmu.edu.cn/api/formEngine/business/' + str(businessId) + '/formRenderData?playerId=owner',
headers=headers).json()['data']['components']
for data in dataFrameJson:
tempDict = {}
tempDict.update({"name": data['name']})
tempDict.update({"title": data['title']})
tempDict.update({'value': {}})
tempDict.update(authorityMap[data['properties']['authority']])
list.append(tempDict)
return {"formData": list, "playerId": "owner"}
def injectPersonalData(formDataJson, personalDataList):
"""
将个人信息注入到formData内
并修改为已打卡
:param formDataJson:
:param personalDataList:
:return: 注入值的框架
"""
dataMap = {} # 建立title与value映射表
for personalData in personalDataList:
valueData = {}
# 地址字段
if (personalData['value']['dataType'] == "ADDRESS_VALUE"):
valueData.update({'addressValue': personalData['value']['addressValue']})
# 普通字段
elif (personalData['value']['dataType'] == "STRING"):
valueData.update({'stringValue': personalData['value']['stringValue']})
# 时间字段
elif (personalData['value']['dataType'] == "DATE"):
valueData.update({'dateValue': personalData['value']['dateValue']})
dataMap.update({personalData['title']: valueData})
# 修改为已打卡
title1 = 'Can you hereby declare that all the information provided is all true and accurate and there is no concealment, false information or omission. 本人是否承诺所填报的全部内容均属实、准确,不存在任何隐瞒和不实的情况,更无遗漏之处。'
dataMap[title1]['stringValue'] = "是 Yes"
title2 = '学生本人是否填写'
dataMap[title2]['stringValue'] = '是'
# 将value注射进list
list = formDataJson['formData']
for i in range(0, list.__len__()):
# 如果有此字段的value则注入
if (dataMap.__contains__(list[i]['title'])):
list[i]['value'] = dataMap[list[i]['title']]
return {"formData": list, "playerId": "owner"}
if __name__ == '__main__':
# 加载配置
loadConfig()
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.159 Safari/537.36',
}
s = requests.session()
response = s.get('https://ids.xmu.edu.cn/authserver/login?service=https://xmuxg.xmu.edu.cn/login/cas/xmu')
HTML = BeautifulSoup(response.text, 'html.parser')
pwdDefaultEncryptSalt = HTML.find_all('input', attrs={'id': 'pwdDefaultEncryptSalt'})[0].attrs['value']
lt = HTML.find_all('input', attrs={'name': 'lt'})[0].attrs['value']
dllt = HTML.find_all('input', attrs={'name': 'dllt'})[0].attrs['value']
execution = HTML.find_all('input', attrs={'name': 'execution'})[0].attrs['value']
_eventId = HTML.find_all('input', attrs={'name': '_eventId'})[0].attrs['value']
rmShown = HTML.find_all('input', attrs={'name': 'rmShown'})[0].attrs['value']
encryptPsw = encrypt(psw, pwdDefaultEncryptSalt)
body = {'username': username,
'password': encryptPsw,
'lt': lt,
'dllt': dllt,
'execution': execution,
'_eventId': _eventId,
'rmShown': rmShown}
s.post('https://ids.xmu.edu.cn/authserver/login?service=https://xmuxg.xmu.edu.cn/login/cas/xmu', data=body,
headers=headers)
r1 = s.get('https://xmuxg.xmu.edu.cn/api/app/214/business/now?getFirst=true', headers=headers)
print(r1.text)
businessId = r1.json()['data'][0]['business']['id']
businessId = businessId
# 获得框架
formDataJson = getDataFrame(s, headers, businessId)
# 获得个人信息
r2Json = s.get(
'https://xmuxg.xmu.edu.cn/api/formEngine/business/' + str(businessId) + '/myFormInstance').json()
# 注入个人信息
formData = injectPersonalData(formDataJson, r2Json['data']['formData'])
# 打卡post的url
instanceId = r2Json['data']['id']
form_url = f'https://xmuxg.xmu.edu.cn/api/formEngine/formInstance/' + instanceId
# 打卡
resp = s.post(form_url, json=formData, headers=headers)
sendEmail(resp.json())
|
mawangdan/XMUDaliyReport
|
src/main.py
|
main.py
|
py
| 6,736 |
python
|
en
|
code
| 15 |
github-code
|
6
|
[
{
"api_name": "json.loads",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "email.mime.text.MIMEText",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "email.utils.formataddr",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "email.utils.formataddr",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "smtplib.SMTP_SSL",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "execjs.compile",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "requests.session",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 134,
"usage_type": "call"
}
] |
17977750270
|
import asyncio
import pickle
import unittest
from typing import AbstractSet, Any, Mapping, Sequence, Union
from testing.types import (
Digits,
I32List,
Integers,
SetI32,
StringBucket,
StrStrMap,
easy,
hard,
)
from thrift.py3.common import Protocol
from thrift.py3.exceptions import Error
from thrift.py3.serializer import (
Transform,
deserialize,
deserialize_from_header,
deserialize_with_length,
serialize,
serialize_iobuf,
serialize_with_header,
serialize_with_header_iobuf,
)
from thrift.py3.types import Struct
class SerializerTests(unittest.TestCase):
def test_with_header_bytes(self) -> None:
control = easy(val=5, val_list=[4, 3, 2, 1])
buf = serialize_with_header(control, transform=Transform.ZSTD_TRANSFORM)
decoded = deserialize_from_header(easy, buf)
self.assertEqual(control, decoded)
def test_with_header_iobuf(self) -> None:
control = easy(val=5, val_list=[4, 3, 2, 1])
iobuf = serialize_with_header_iobuf(control, transform=Transform.ZSTD_TRANSFORM)
decoded = deserialize_from_header(easy, iobuf)
self.assertEqual(control, decoded)
def test_with_header_iobuf_binary(self) -> None:
control = easy(val=6, val_list=[5, 4, 3, 2, 1])
iobuf = serialize_with_header_iobuf(
control, protocol=Protocol.BINARY, transform=Transform.ZLIB_TRANSFORM
)
decoded = deserialize_from_header(easy, iobuf)
self.assertEqual(control, decoded)
def test_with_header_iobuf_json(self) -> None:
control = easy(val=4, val_list=[3, 2, 1])
iobuf = serialize_with_header_iobuf(control, protocol=Protocol.JSON)
decoded = deserialize_from_header(easy, iobuf)
self.assertEqual(control, decoded)
def test_None(self) -> None:
with self.assertRaises(TypeError):
serialize(None, Protocol.JSON) # type: ignore
def test_sanity(self) -> None:
with self.assertRaises(TypeError):
serialize(1, Protocol.COMPACT) # type: ignore
with self.assertRaises(TypeError):
serialize(easy(), None) # type: ignore
with self.assertRaises(TypeError):
deserialize(Protocol, b"") # type: ignore
with self.assertRaises(TypeError):
deserialize(easy, Protocol) # type: ignore
def test_from_thread_pool(self) -> None:
control = easy(val=5, val_list=[1, 2, 3, 4])
loop = asyncio.get_event_loop()
coro = loop.run_in_executor(None, serialize, control)
encoded = loop.run_until_complete(coro)
coro = loop.run_in_executor(None, deserialize, type(control), encoded)
decoded = loop.run_until_complete(coro)
self.assertEqual(control, decoded)
def test_serialize_iobuf(self) -> None:
control = easy(val=5, val_list=[1, 2, 3, 4, 5])
iobuf = serialize_iobuf(control)
decoded = deserialize(type(control), iobuf)
self.assertEqual(control, decoded)
def test_bad_deserialize(self) -> None:
with self.assertRaises(Error):
deserialize(easy, b"", protocol=Protocol.JSON)
with self.assertRaises(Error):
deserialize(easy, b"\x05AAAAAAAA")
with self.assertRaises(Error):
deserialize(easy, b"\x02\xDE\xAD\xBE\xEF", protocol=Protocol.BINARY)
def thrift_serialization_round_robin(
self, control: Struct, fixtures: Mapping[Protocol, bytes]
) -> None:
for proto in Protocol:
encoded = serialize(control, protocol=proto)
self.assertIsInstance(encoded, bytes)
decoded = deserialize(type(control), encoded, protocol=proto)
self.assertIsInstance(decoded, type(control))
self.assertEqual(control, decoded)
self.assertEqual((proto, encoded), (proto, fixtures.get(proto)))
def pickle_round_robin(
self,
# pyre-fixme[2]: Parameter annotation cannot contain `Any`.
control: Union[Struct, Mapping[Any, Any], Sequence[Any], AbstractSet[Any]],
) -> None:
encoded = pickle.dumps(control, protocol=pickle.HIGHEST_PROTOCOL)
decoded = pickle.loads(encoded)
self.assertIsInstance(decoded, type(control))
self.assertEqual(control, decoded)
def test_serialize_easy_struct(self) -> None:
control = easy(val=5, val_list=[1, 2, 3, 4])
fixtures: Mapping[Protocol, bytes] = {
Protocol.COMPACT: b"\x15\n\x19E\x02\x04\x06\x08,\x00\x00",
Protocol.BINARY: b"\x08\x00\x01\x00\x00\x00\x05\x0f\x00\x02\x08\x00\x00\x00"
b"\x04\x00\x00\x00\x01\x00\x00\x00\x02\x00\x00\x00\x03\x00"
b"\x00\x00\x04\x0c\x00\x04\x00\x00",
Protocol.JSON: b'{"val":5,"val_list":[1,2,3,4],"an_int":{}}',
Protocol.COMPACT_JSON: b'{"1":{"i32":5},"2":{"lst":["i32",4,1,2,3,4]},"4"'
b':{"rec":{}}}',
}
self.thrift_serialization_round_robin(control, fixtures)
def test_pickle_easy_struct(self) -> None:
control = easy(val=0, val_list=[5, 6, 7])
self.pickle_round_robin(control)
def test_serialize_hard_struct(self) -> None:
control = hard(
val=0, val_list=[1, 2, 3, 4], name="foo", an_int=Integers(tiny=1)
)
fixtures: Mapping[Protocol, bytes] = {
Protocol.COMPACT: b"\x15\x00\x19E\x02\x04\x06\x08\x18\x03foo\x1c\x13\x01"
b"\x00\x18\x0csome default\x00",
Protocol.BINARY: b"\x08\x00\x01\x00\x00\x00\x00\x0f\x00\x02\x08\x00\x00\x00"
b"\x04\x00\x00\x00\x01\x00\x00\x00\x02\x00\x00\x00\x03\x00"
b"\x00\x00\x04\x0b\x00\x03\x00\x00\x00\x03foo\x0c\x00\x04"
b"\x03\x00\x01\x01\x00\x0b\x00\x05\x00\x00\x00\x0csome def"
b"ault\x00",
Protocol.JSON: b'{"val":0,"val_list":[1,2,3,4],"name":"foo","an_int":{"tiny'
b'":1},"other":"some default"}',
Protocol.COMPACT_JSON: b'{"1":{"i32":0},"2":{"lst":["i32",4,1,2,3,4]},"3":'
b'{"str":"foo"},"4":{"rec":{"1":{"i8":1}}},"5":{"str":"some default"}}',
}
self.thrift_serialization_round_robin(control, fixtures)
def test_pickle_hard_struct(self) -> None:
control = hard(
val=0, val_list=[1, 2, 3, 4], name="foo", an_int=Integers(tiny=1)
)
self.pickle_round_robin(control)
def test_serialize_Integers_union(self) -> None:
control = Integers(medium=1337)
fixtures: Mapping[Protocol, bytes] = {
Protocol.COMPACT: b"5\xf2\x14\x00",
Protocol.BINARY: b"\x08\x00\x03\x00\x00\x059\x00",
Protocol.JSON: b'{"medium":1337}',
Protocol.COMPACT_JSON: b'{"3":{"i32":1337}}',
}
self.thrift_serialization_round_robin(control, fixtures)
def test_pickle_Integers_union(self) -> None:
control = Integers(large=2 ** 32)
self.pickle_round_robin(control)
def test_pickle_sequence(self) -> None:
control = I32List([1, 2, 3, 4])
self.pickle_round_robin(control)
digits = Digits(data=[Integers(tiny=1), Integers(tiny=2), Integers(large=0)])
data = digits.data
assert data
self.pickle_round_robin(data)
def test_pickle_set(self) -> None:
control = SetI32({1, 2, 3, 4})
self.pickle_round_robin(control)
def test_pickle_mapping(self) -> None:
control = StrStrMap({"test": "test", "foo": "bar"})
self.pickle_round_robin(control)
def test_deserialize_with_length(self) -> None:
control = easy(val=5, val_list=[1, 2, 3, 4, 5])
for proto in Protocol:
encoded = serialize(control, protocol=proto)
decoded, length = deserialize_with_length(
type(control), encoded, protocol=proto
)
self.assertIsInstance(decoded, type(control))
self.assertEqual(decoded, control)
self.assertEqual(length, len(encoded))
def test_string_with_non_utf8_data(self) -> None:
encoded = b"\x0b\x00\x01\x00\x00\x00\x03foo\x00"
sb = deserialize(StringBucket, encoded, protocol=Protocol.BINARY)
self.assertEqual("foo", sb.one)
encoded = b"\x0b\x00\x01\x00\x00\x00\x03\xfa\xf0\xef\x00"
sb = deserialize(StringBucket, encoded, protocol=Protocol.BINARY)
with self.assertRaises(UnicodeDecodeError):
# Accessing the property is when the string is decoded as UTF-8.
sb.one
|
WeilerWebServices/Facebook
|
fbthrift/thrift/lib/py3/test/serializer.py
|
serializer.py
|
py
| 8,534 |
python
|
en
|
code
| 3 |
github-code
|
6
|
[
{
"api_name": "unittest.TestCase",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "testing.types.easy",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "thrift.py3.serializer.serialize_with_header",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "thrift.py3.serializer.Transform.ZSTD_TRANSFORM",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "thrift.py3.serializer.Transform",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "thrift.py3.serializer.deserialize_from_header",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "testing.types.easy",
"line_number": 35,
"usage_type": "argument"
},
{
"api_name": "testing.types.easy",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "thrift.py3.serializer.serialize_with_header_iobuf",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "thrift.py3.serializer.Transform.ZSTD_TRANSFORM",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "thrift.py3.serializer.Transform",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "thrift.py3.serializer.deserialize_from_header",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "testing.types.easy",
"line_number": 41,
"usage_type": "argument"
},
{
"api_name": "testing.types.easy",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "thrift.py3.serializer.serialize_with_header_iobuf",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "thrift.py3.common.Protocol.BINARY",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "thrift.py3.common.Protocol",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "thrift.py3.serializer.Transform.ZLIB_TRANSFORM",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "thrift.py3.serializer.Transform",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "thrift.py3.serializer.deserialize_from_header",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "testing.types.easy",
"line_number": 49,
"usage_type": "argument"
},
{
"api_name": "testing.types.easy",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "thrift.py3.serializer.serialize_with_header_iobuf",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "thrift.py3.common.Protocol.JSON",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "thrift.py3.common.Protocol",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "thrift.py3.serializer.deserialize_from_header",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "testing.types.easy",
"line_number": 55,
"usage_type": "argument"
},
{
"api_name": "thrift.py3.serializer.serialize",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "thrift.py3.common.Protocol.JSON",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "thrift.py3.common.Protocol",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "thrift.py3.serializer.serialize",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "thrift.py3.common.Protocol.COMPACT",
"line_number": 64,
"usage_type": "attribute"
},
{
"api_name": "thrift.py3.common.Protocol",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "thrift.py3.serializer.serialize",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "testing.types.easy",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "thrift.py3.serializer.deserialize",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "thrift.py3.common.Protocol",
"line_number": 70,
"usage_type": "argument"
},
{
"api_name": "thrift.py3.serializer.deserialize",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "testing.types.easy",
"line_number": 73,
"usage_type": "argument"
},
{
"api_name": "thrift.py3.common.Protocol",
"line_number": 73,
"usage_type": "argument"
},
{
"api_name": "testing.types.easy",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "asyncio.get_event_loop",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "thrift.py3.serializer.serialize",
"line_number": 78,
"usage_type": "argument"
},
{
"api_name": "thrift.py3.serializer.deserialize",
"line_number": 80,
"usage_type": "argument"
},
{
"api_name": "testing.types.easy",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "thrift.py3.serializer.serialize_iobuf",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "thrift.py3.serializer.deserialize",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "thrift.py3.exceptions.Error",
"line_number": 91,
"usage_type": "argument"
},
{
"api_name": "thrift.py3.serializer.deserialize",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "testing.types.easy",
"line_number": 92,
"usage_type": "argument"
},
{
"api_name": "thrift.py3.common.Protocol.JSON",
"line_number": 92,
"usage_type": "attribute"
},
{
"api_name": "thrift.py3.common.Protocol",
"line_number": 92,
"usage_type": "name"
},
{
"api_name": "thrift.py3.exceptions.Error",
"line_number": 93,
"usage_type": "argument"
},
{
"api_name": "thrift.py3.serializer.deserialize",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "testing.types.easy",
"line_number": 94,
"usage_type": "argument"
},
{
"api_name": "thrift.py3.exceptions.Error",
"line_number": 95,
"usage_type": "argument"
},
{
"api_name": "thrift.py3.serializer.deserialize",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "testing.types.easy",
"line_number": 96,
"usage_type": "argument"
},
{
"api_name": "thrift.py3.common.Protocol.BINARY",
"line_number": 96,
"usage_type": "attribute"
},
{
"api_name": "thrift.py3.common.Protocol",
"line_number": 96,
"usage_type": "name"
},
{
"api_name": "thrift.py3.types.Struct",
"line_number": 99,
"usage_type": "name"
},
{
"api_name": "typing.Mapping",
"line_number": 99,
"usage_type": "name"
},
{
"api_name": "thrift.py3.common.Protocol",
"line_number": 99,
"usage_type": "name"
},
{
"api_name": "thrift.py3.common.Protocol",
"line_number": 101,
"usage_type": "name"
},
{
"api_name": "thrift.py3.serializer.serialize",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "thrift.py3.serializer.deserialize",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "typing.Union",
"line_number": 112,
"usage_type": "name"
},
{
"api_name": "thrift.py3.types.Struct",
"line_number": 112,
"usage_type": "name"
},
{
"api_name": "typing.Mapping",
"line_number": 112,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 112,
"usage_type": "name"
},
{
"api_name": "typing.Sequence",
"line_number": 112,
"usage_type": "name"
},
{
"api_name": "typing.AbstractSet",
"line_number": 112,
"usage_type": "name"
},
{
"api_name": "pickle.dumps",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "pickle.HIGHEST_PROTOCOL",
"line_number": 114,
"usage_type": "attribute"
},
{
"api_name": "pickle.loads",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "testing.types.easy",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "typing.Mapping",
"line_number": 121,
"usage_type": "name"
},
{
"api_name": "thrift.py3.common.Protocol",
"line_number": 121,
"usage_type": "name"
},
{
"api_name": "thrift.py3.common.Protocol.COMPACT",
"line_number": 122,
"usage_type": "attribute"
},
{
"api_name": "thrift.py3.common.Protocol",
"line_number": 122,
"usage_type": "name"
},
{
"api_name": "thrift.py3.common.Protocol.BINARY",
"line_number": 123,
"usage_type": "attribute"
},
{
"api_name": "thrift.py3.common.Protocol",
"line_number": 123,
"usage_type": "name"
},
{
"api_name": "thrift.py3.common.Protocol.JSON",
"line_number": 126,
"usage_type": "attribute"
},
{
"api_name": "thrift.py3.common.Protocol",
"line_number": 126,
"usage_type": "name"
},
{
"api_name": "thrift.py3.common.Protocol.COMPACT_JSON",
"line_number": 127,
"usage_type": "attribute"
},
{
"api_name": "thrift.py3.common.Protocol",
"line_number": 127,
"usage_type": "name"
},
{
"api_name": "testing.types.easy",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "testing.types.hard",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "testing.types.Integers",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "typing.Mapping",
"line_number": 140,
"usage_type": "name"
},
{
"api_name": "thrift.py3.common.Protocol",
"line_number": 140,
"usage_type": "name"
},
{
"api_name": "thrift.py3.common.Protocol.COMPACT",
"line_number": 141,
"usage_type": "attribute"
},
{
"api_name": "thrift.py3.common.Protocol",
"line_number": 141,
"usage_type": "name"
},
{
"api_name": "thrift.py3.common.Protocol.BINARY",
"line_number": 143,
"usage_type": "attribute"
},
{
"api_name": "thrift.py3.common.Protocol",
"line_number": 143,
"usage_type": "name"
},
{
"api_name": "thrift.py3.common.Protocol.JSON",
"line_number": 148,
"usage_type": "attribute"
},
{
"api_name": "thrift.py3.common.Protocol",
"line_number": 148,
"usage_type": "name"
},
{
"api_name": "thrift.py3.common.Protocol.COMPACT_JSON",
"line_number": 150,
"usage_type": "attribute"
},
{
"api_name": "thrift.py3.common.Protocol",
"line_number": 150,
"usage_type": "name"
},
{
"api_name": "testing.types.hard",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "testing.types.Integers",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "testing.types.Integers",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "typing.Mapping",
"line_number": 163,
"usage_type": "name"
},
{
"api_name": "thrift.py3.common.Protocol",
"line_number": 163,
"usage_type": "name"
},
{
"api_name": "thrift.py3.common.Protocol.COMPACT",
"line_number": 164,
"usage_type": "attribute"
},
{
"api_name": "thrift.py3.common.Protocol",
"line_number": 164,
"usage_type": "name"
},
{
"api_name": "thrift.py3.common.Protocol.BINARY",
"line_number": 165,
"usage_type": "attribute"
},
{
"api_name": "thrift.py3.common.Protocol",
"line_number": 165,
"usage_type": "name"
},
{
"api_name": "thrift.py3.common.Protocol.JSON",
"line_number": 166,
"usage_type": "attribute"
},
{
"api_name": "thrift.py3.common.Protocol",
"line_number": 166,
"usage_type": "name"
},
{
"api_name": "thrift.py3.common.Protocol.COMPACT_JSON",
"line_number": 167,
"usage_type": "attribute"
},
{
"api_name": "thrift.py3.common.Protocol",
"line_number": 167,
"usage_type": "name"
},
{
"api_name": "testing.types.Integers",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "testing.types.I32List",
"line_number": 177,
"usage_type": "call"
},
{
"api_name": "testing.types.Digits",
"line_number": 180,
"usage_type": "call"
},
{
"api_name": "testing.types.Integers",
"line_number": 180,
"usage_type": "call"
},
{
"api_name": "testing.types.SetI32",
"line_number": 186,
"usage_type": "call"
},
{
"api_name": "testing.types.StrStrMap",
"line_number": 190,
"usage_type": "call"
},
{
"api_name": "testing.types.easy",
"line_number": 194,
"usage_type": "call"
},
{
"api_name": "thrift.py3.common.Protocol",
"line_number": 195,
"usage_type": "name"
},
{
"api_name": "thrift.py3.serializer.serialize",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "thrift.py3.serializer.deserialize_with_length",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "thrift.py3.serializer.deserialize",
"line_number": 206,
"usage_type": "call"
},
{
"api_name": "testing.types.StringBucket",
"line_number": 206,
"usage_type": "argument"
},
{
"api_name": "thrift.py3.common.Protocol.BINARY",
"line_number": 206,
"usage_type": "attribute"
},
{
"api_name": "thrift.py3.common.Protocol",
"line_number": 206,
"usage_type": "name"
},
{
"api_name": "thrift.py3.serializer.deserialize",
"line_number": 210,
"usage_type": "call"
},
{
"api_name": "testing.types.StringBucket",
"line_number": 210,
"usage_type": "argument"
},
{
"api_name": "thrift.py3.common.Protocol.BINARY",
"line_number": 210,
"usage_type": "attribute"
},
{
"api_name": "thrift.py3.common.Protocol",
"line_number": 210,
"usage_type": "name"
}
] |
31132813401
|
from abc import ABC
from collections import OrderedDict, defaultdict
import torch
import torch.nn.functional as F
from torch import flatten
from torch.nn import Module, Conv2d, Dropout, Linear, BatchNorm2d, ReLU, Sequential, MaxPool2d
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LRScheduler
from torch.utils.data import DataLoader
from tqdm.auto import tqdm
class AbstractModule(Module, ABC): # TODO check that it's abstract
def __init__(self):
super().__init__()
self._optim = None
self._criterion = None
self._scheduler = None
self._pruner = None
def optimizer(self, optim: callable(Optimizer), **kwargs):
self._optim = optim(self.parameters(), **kwargs)
return self
def scheduler(self, scheduler: callable(LRScheduler), **kwargs):
self._scheduler = scheduler(self._optim, **kwargs)
return self
def criterion(self, criterion: Module):
self._criterion = criterion
return self
def fit(self,
dataloader: DataLoader,
epochs: int,
callbacks=None
) -> None:
device = 'cuda' if torch.cuda.is_available() else 'cpu'
model = self.to(device).train()
for epoch in range(1, epochs + 1):
loader_bar = tqdm(dataloader, desc='train', leave=False)
for inputs, targets in loader_bar:
inputs = inputs.to(device)
targets = targets.to(device)
# Reset the gradients (from the last iteration)
self._optim.zero_grad()
outputs = model(inputs)
loss = self._criterion(outputs, targets)
loss.backward()
self._optim.step()
if callbacks is not None:
for callback in callbacks:
callback()
loader_bar.set_description(f"Epoch [{epoch}/{epochs}]")
if self._scheduler is not None:
self._scheduler.step()
@torch.inference_mode()
def evaluate(self,
dataloader: DataLoader,
verbose=True,
) -> float:
device = 'cuda' if torch.cuda.is_available() else 'cpu'
model = self.to(device).eval()
num_samples = 0
num_correct = 0
for inputs, targets in tqdm(dataloader, desc="eval", leave=False, disable=not verbose):
inputs = inputs.to(device)
targets = targets.to(device)
outputs = model(inputs)
outputs = outputs.argmax(dim=1)
# Update metrics
num_samples += targets.size(0)
num_correct += (outputs == targets).sum()
return (num_correct / num_samples * 100).item()
class BaseLineNet(AbstractModule):
def __init__(self):
super().__init__()
self.conv1 = Conv2d(1, 32, 3, 1) # 1 x 32 x 3 x 3 = 288 parameters
self.conv2 = Conv2d(32, 64, 3, 1) # 32 x 64 x 3 x 3=18,432 parameters
self.dropout1 = Dropout(0.25)
self.dropout2 = Dropout(0.5)
self.fc1 = Linear(9216, 128) # 9216 x 128 = 1,179,648 parameters
self.fc2 = Linear(128, 10) # 128 x 10 = 1,280 parameters
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output
class VGG(AbstractModule):
ARCH = [64, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M']
def __init__(self) -> None:
super().__init__()
layers = []
counts = defaultdict(int)
def add(name: str, layer: Module) -> None:
layers.append((f"{name}{counts[name]}", layer))
counts[name] += 1
in_channels = 3
for x in self.ARCH:
if x != 'M':
# conv-bn-relu
add("conv", Conv2d(in_channels, x, 3, padding=1, bias=False))
add("bn", BatchNorm2d(x))
add("relu", ReLU(True))
in_channels = x
else:
add("pool", MaxPool2d(2))
self.backbone = Sequential(OrderedDict(layers))
self.classifier = Linear(512, 10)
def forward(self, x: torch.Tensor) -> torch.Tensor:
# backbone: [N, 3, 32, 32] => [N, 512, 2, 2]
x = self.backbone(x)
# avgpool: [N, 512, 2, 2] => [N, 512]
x = x.mean([2, 3])
# classifier: [N, 512] => [N, 10]
x = self.classifier(x)
return x
|
bnwiran/tinyml-benchmark
|
models/models.py
|
models.py
|
py
| 4,749 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "torch.nn.Module",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "abc.ABC",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "torch.optim.Optimizer",
"line_number": 23,
"usage_type": "argument"
},
{
"api_name": "torch.optim.lr_scheduler.LRScheduler",
"line_number": 27,
"usage_type": "argument"
},
{
"api_name": "torch.nn.Module",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "tqdm.auto.tqdm",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 70,
"usage_type": "name"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 73,
"usage_type": "attribute"
},
{
"api_name": "tqdm.auto.tqdm",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "torch.inference_mode",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "torch.nn.Dropout",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "torch.nn.Dropout",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "torch.nn.Linear",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "torch.nn.Linear",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional.relu",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 105,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.relu",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 107,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.max_pool2d",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 108,
"usage_type": "name"
},
{
"api_name": "torch.flatten",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional.relu",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 112,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.log_softmax",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 115,
"usage_type": "name"
},
{
"api_name": "collections.defaultdict",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_number": 128,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "torch.nn.BatchNorm2d",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "torch.nn.MaxPool2d",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "collections.OrderedDict",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "torch.nn.Linear",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "torch.Tensor",
"line_number": 146,
"usage_type": "attribute"
}
] |
16586269759
|
from flask import Blueprint, render_template
from app.models import Post
home = Blueprint('home', __name__)
@home.route('/')
def index():
posts = Post.query.filter_by(published=True).all()
return render_template('home/index.html', posts=posts)
|
rg3915/flask-masterclass
|
app/blueprints/home_blueprint.py
|
home_blueprint.py
|
py
| 256 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "flask.Blueprint",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "app.models.Post.query.filter_by",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "app.models.Post.query",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "app.models.Post",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 11,
"usage_type": "call"
}
] |
9411671299
|
from django.urls import path
from . import views as blog_views
# import users.views as user_views
from .views import (
PostListView,
PostDetailView,
PostCreateView,
PostUpdateView,
PostDeleteView,
UserPostListView
)
urlpatterns = [
path('', PostListView.as_view(), name='blog-home'),
path('user/<str:username>', UserPostListView.as_view(), name='user-posts'),
# using a variable in the route (for individual posts, which will be numbered)
# the detail view is expecting the "pk" variable (we could change this in the class)
path('post/<int:pk>/', PostDetailView.as_view(), name='post-detail'),
path('post/<int:pk>/update', PostUpdateView.as_view(), name='post-update'),
path('post/<int:pk>/delete', PostDeleteView.as_view(), name='post-delete'),
path('post/new/', PostCreateView.as_view(), name='post-create'),
path('about/', blog_views.about, name='blog-about'),
]
|
Coniferish/djangoTutorial
|
blog/urls.py
|
urls.py
|
py
| 935 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.urls.path",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "views.PostListView.as_view",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "views.PostListView",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "views.UserPostListView.as_view",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "views.UserPostListView",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "views.PostDetailView.as_view",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "views.PostDetailView",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "views.PostUpdateView.as_view",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "views.PostUpdateView",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "views.PostDeleteView.as_view",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "views.PostDeleteView",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "views.PostCreateView.as_view",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "views.PostCreateView",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 22,
"usage_type": "call"
}
] |
30085050335
|
from django.shortcuts import render
from django.http import JsonResponse
from category.models import Category
# Create your views here.
def jsons(data = None, errorCode = 0, cookies = ''):
if data is None:
data = []
return JsonResponse({'errorCode': errorCode, 'data': data, 'cookies': cookies})
def categoryGetAll(request):
categories = Category.objects.all()
return jsons([dict(category.body()) for category in categories])
|
jeremyytann/BUAA-SE-LetStudy
|
Code/backend/category/views.py
|
views.py
|
py
| 456 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.http.JsonResponse",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "category.models.Category.objects.all",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "category.models.Category.objects",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "category.models.Category",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "category.models.body",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "category.models",
"line_number": 14,
"usage_type": "name"
}
] |
21424331672
|
'''
1. Парсер однопоточный.
2. Замер времени
3. Multiprocessing Pool
4. Замер времени
5. Экспорт в csv
'''
import requests
from bs4 import BeautifulSoup
from datetime import datetime
from multiprocessing import Pool
import csv
import time
def get_html(url):
r = requests.get(url) # Response
return r.text # Возвращает HTML-код страницы(url)
def get_all_links(html):
counter = 0
soup = BeautifulSoup(html, 'lxml')
tags_div = soup.find('div').find_all('div', class_="cmc-table__column-name sc-1kxikfi-0 eTVhdN")
links = []
for td in tags_div:
a = td.find('a').get('href') #string
link = "https://coinmarketcap.com" + a
links.append(link)
return links
def get_page_data(html):
soup = BeautifulSoup(html, 'lxml')
try:
name = soup.find("h1").text.strip()
except:
name = ""
try:
price = soup.find("span", class_="cmc-details-panel-price__price").text.strip()
except:
price = ""
data = {'name': name, 'price': price}
return data
def write_csv(data):
with open('coinmarketcap.csv', 'a') as f:
writer = csv.writer(f)
writer.writerow((data['name'],
data['price']))
print(data['name'], 'parsed')
def make_all(url):
html = get_html(url)
data = get_page_data(html)
write_csv(data)
# time.sleep(5)
def main():
start = time.time()
url = "https://coinmarketcap.com/all/views/all/"
all_links = get_all_links(get_html(url))
with Pool(40) as p:
p.map(make_all, all_links)
end = time.time()
total = end - start
print(str(total))
if __name__ == "__main__":
main()
|
DexterAkaGrich/potential-couscous
|
first_meet.py
|
first_meet.py
|
py
| 1,849 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "requests.get",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "csv.writer",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "multiprocessing.Pool",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 79,
"usage_type": "call"
}
] |
29579809040
|
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 7 11:04:13 2018
@author: Akitaka
"""
# 1:ライブラリのインポート--------------------------------
import numpy as np #numpyという行列などを扱うライブラリを利用
import pandas as pd #pandasというデータ分析ライブラリを利用
import matplotlib.pyplot as plt #プロット用のライブラリを利用
from sklearn import linear_model, metrics, preprocessing, cross_validation #機械学習用のライブラリを利用
from mlxtend.plotting import plot_decision_regions #学習結果をプロットする外部ライブラリを利用
from sklearn.kernel_approximation import RBFSampler #カーネル近似用の関数
from matplotlib.colors import ListedColormap #plot用
# 2:XORのデータを作成する(x=正、y=正)=0,(x=正、y=負)=1, 的な--------------
np.random.seed(0)
X_xor=np.random.randn(200,2)
y_xor=np.logical_xor(X_xor[:,0]>0, X_xor[:,1]>0)
y_xor=np.where(y_xor,1,0)
pd.DataFrame(y_xor) #この行を実行するとデータが見れる
# 3:プロットしてみる------------------------------------------------------
#%matplotlib inline
plt.scatter(X_xor[y_xor==1, 0], X_xor[y_xor==1, 1], c='b', marker='x', label='1')
plt.scatter(X_xor[y_xor==0, 0], X_xor[y_xor==0, 1], c='r', marker='s', label='0')
plt.legend(loc='best')
plt.show
# 4:データの整形-------------------------------------------------------
X_std=X_xor
z=y_xor
#解説 5:カーネル近似を適用する------------------------------------------
rbf_feature = RBFSampler(gamma=1, n_components=100, random_state=1)
X_std = rbf_feature.fit_transform(X_std)
print("X_stdの大きさ ",pd.DataFrame(X_std).shape)
#pd.DataFrame(X_std).to_clipboard() #これでクリップボードに保持できるのでエクセルに貼れる
# 6:機械学習で分類する---------------------------------------------------
clf_result=linear_model.SGDClassifier(loss="hinge") #loss="hinge", loss="log"
# 7:K分割交差検証(cross validation)で性能を評価する---------------------
scores=cross_validation.cross_val_score(clf_result, X_std, z, cv=10)
print("平均正解率 = ", scores.mean())
print("正解率の標準偏差 = ", scores.std())
# 8:トレーニングデータとテストデータに分けて実行してみる------------------
X_train, X_test, train_label, test_label=cross_validation.train_test_split(X_std,z, test_size=0.1, random_state=1)
clf_result.fit(X_train, train_label)
#正答率を求める
pre=clf_result.predict(X_test)
ac_score=metrics.accuracy_score(test_label,pre)
print("正答率 = ",ac_score)
# 解説 9:Plotする
x1_min, x1_max, x2_min, x2_max=-3, 3, -3, 3
resolution=0.02
xx1, xx2=np.meshgrid(np.arange(x1_min, x1_max, resolution),np.arange(x2_min, x2_max, resolution))
X=(np.array([xx1.ravel(), xx2.ravel()]).T)
plot_z=clf_result.predict(rbf_feature.fit_transform(X))
colors=('red','blue')
cmap=ListedColormap(colors[:len(np.unique(plot_z))])
plot_z=plot_z.reshape(xx1.shape)
plt.contourf(xx1,xx2, plot_z, alpha=0.4, cmap=cmap)
|
nakanishi-akitaka/python2018_backup
|
1207/ml2b.py
|
ml2b.py
|
py
| 3,157 |
python
|
ja
|
code
| 5 |
github-code
|
6
|
[
{
"api_name": "numpy.random.seed",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.randn",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "numpy.logical_xor",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "sklearn.kernel_approximation.RBFSampler",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "sklearn.linear_model.SGDClassifier",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "sklearn.linear_model",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "sklearn.cross_validation.cross_val_score",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "sklearn.cross_validation",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "sklearn.cross_validation.train_test_split",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "sklearn.cross_validation",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "sklearn.metrics.accuracy_score",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "numpy.meshgrid",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "matplotlib.colors.ListedColormap",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "numpy.unique",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.contourf",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 69,
"usage_type": "name"
}
] |
8629709747
|
from flask import Flask,request
app = Flask(__name__)
@app.route('/')
def home():
return "Bem-Vindo"
@app.route('/calculo')
def add():
a = 10
b = 10
return str(a+b)
if __name__ == '__main__':
app.run()
|
kaibernu/MLDeploy
|
API.py
|
API.py
|
py
| 231 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "flask.Flask",
"line_number": 3,
"usage_type": "call"
}
] |
39290687517
|
#!/usr/bin/env python2
from __future__ import print_function
from Bio import SeqIO
import sys, vcf, getopt
__author__ = 'Kumar'
sample_number = int(0)
vcf_file = ''
a = int(0)
x = int(0)
n = int(0)
position = int(0)
fold = int()
try:
myopts, args = getopt.getopt(sys.argv[1:],"f:s:")
for o, a in myopts:
if o == '-f':
vcf_file = str(a)
elif o == '-s':
sample_number = int(a)
except getopt.GetoptError as e:
print(str(e))
print("Usage:: %s -f <vcf_file> -s <sample index in case of multi-sample vcf file>" % sys.argv[0])
sys.exit(2)
vcf_reader = vcf.Reader(open(vcf_file, 'r'))
sf = open("outfile.sf", "w")
for record in vcf_reader:
#print(record.samples)
position = record.POS
ad = record.samples[sample_number]['AD']
#print(ad)
if ad == None:
continue
else:
a = ad[0]
x = ad[1]
#print("%s::::%s"% (a,x))
n = a + x
if a > x:
fold = 0
else:
fold = 1
header = "location\tx\tn\tfolded\n"
if sf.tell() == 0:
sf.write(header)
sf.write("%d\t%d\t%d\t%d\n"% (position, x, n, fold))
else:
sf.write("%d\t%d\t%d\t%d\n"% (position, x, n, fold))
sf.close()
|
kumarsaurabh20/NGShelper
|
PopulationGenomics/vcf2sf.py
|
vcf2sf.py
|
py
| 1,109 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "getopt.getopt",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "getopt.GetoptError",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "vcf.Reader",
"line_number": 29,
"usage_type": "call"
}
] |
71361812987
|
import sys
import mysql.connector
from awsglue.utils import getResolvedOptions
params = [
'db_host',
'db_port',
'db_user',
'db_password',
'db_database',
'ticket_id_to_be_updated'
]
args = getResolvedOptions(sys.argv, params)
cnx = mysql.connector.connect(
host=args['db_host'],
port=args['db_port'],
user=args['db_user'],
password=args['db_password'],
database=args['db_database']
)
cur = cnx.cursor()
def update_data(cursor, connection):
ticket_id = args['ticket_id_to_be_updated']
print("Selecting one record from table {}".format("customer"))
cursor.execute("SELECT customer_id FROM customer ORDER BY RAND() LIMIT 1")
rows = cursor.fetchall()
customer_id = ""
for row in rows:
customer_id = row[0]
update_event = ("UPDATE ticket_activity SET purchased_by={}, updated_at=now() WHERE ticket_id={}".format(customer_id, ticket_id))
cursor.execute(update_event)
connection.commit()
def read_data(cursor):
cursor.execute("SELECT * FROM ticket_activity")
rows = cursor.fetchall()
for row in rows:
print(row)
read_data(cur)
update_data(cur, cnx)
read_data(cur)
cur.close()
cnx.close()
|
bhavik161/studio
|
rds/rds_upsert_data.py
|
rds_upsert_data.py
|
py
| 1,200 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "awsglue.utils.getResolvedOptions",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "mysql.connector.connector.connect",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "mysql.connector.connector",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "mysql.connector",
"line_number": 16,
"usage_type": "name"
}
] |
22020962951
|
from pathlib import Path
import matplotlib.pyplot as plt
import pandas as pd
import rich
import seaborn as sns
import typer
from boiling_learning.app.configuration import configure
from boiling_learning.app.datasets.bridged.boiling1d import DEFAULT_BOILING_OUTLIER_FILTER
from boiling_learning.app.datasets.preprocessed.boiling1d import boiling_datasets
from boiling_learning.app.paths import studies_path
from boiling_learning.app.training.boiling1d import DEFAULT_BOILING_HEAT_FLUX_TARGET
from boiling_learning.datasets.sliceable import targets
from boiling_learning.image_datasets import ImageDatasetTriplet
from boiling_learning.lazy import LazyDescribed
from boiling_learning.utils.pathutils import resolve
app = typer.Typer()
console = rich.console.Console()
@app.command()
def boiling1d() -> None:
configure(
force_gpu_allow_growth=True,
use_xla=True,
require_gpu=True,
)
datasets = boiling_datasets(direct_visualization=True)
f, axes = plt.subplots(len(datasets), 1, figsize=(6, 4))
for index, (ax, dataset) in enumerate(zip(axes, datasets)):
data = _sorted_boiling_datasets(dataset)
sns.scatterplot(
ax=ax,
data=data,
x='index',
y='heat flux',
hue='class',
alpha=0.5,
)
ax.set_title(f'Dataset {index}')
f.savefig(str(_data_split_study_path() / 'boiling1d.pdf'))
@app.command()
def condensation(
each: int = typer.Option(60),
normalize: bool = typer.Option(...),
) -> None:
raise NotImplementedError
def _sorted_boiling_datasets(datasets: LazyDescribed[ImageDatasetTriplet]) -> pd.DataFrame:
ds_train, ds_val, ds_test = datasets()
df = pd.DataFrame(
sorted(
(
(
target['nominal_power'],
target[DEFAULT_BOILING_HEAT_FLUX_TARGET],
target['elapsed_time'],
class_name,
)
for class_name, ds in (
('train', ds_train),
('val', ds_val),
('test', ds_test),
)
for target in targets(ds).prefetch(1024)
if DEFAULT_BOILING_OUTLIER_FILTER()(None, target)
),
key=lambda power_hf_et_class: (
power_hf_et_class[0],
power_hf_et_class[2],
),
),
columns=['nominal power', 'heat flux', 'elapsed time', 'class'],
)
df['index'] = range(len(df))
return df
def _data_split_study_path() -> Path:
return resolve(studies_path() / 'data-split', dir=True)
|
ruancomelli/boiling-learning
|
boiling_learning/app/studies/data_split.py
|
data_split.py
|
py
| 2,677 |
python
|
en
|
code
| 7 |
github-code
|
6
|
[
{
"api_name": "typer.Typer",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "rich.console.Console",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "rich.console",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "boiling_learning.app.configuration.configure",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "boiling_learning.app.datasets.preprocessed.boiling1d.boiling_datasets",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "seaborn.scatterplot",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "typer.Option",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "typer.Option",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "boiling_learning.lazy.LazyDescribed",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "boiling_learning.image_datasets.ImageDatasetTriplet",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "boiling_learning.app.training.boiling1d.DEFAULT_BOILING_HEAT_FLUX_TARGET",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "boiling_learning.datasets.sliceable.targets",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "boiling_learning.app.datasets.bridged.boiling1d.DEFAULT_BOILING_OUTLIER_FILTER",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "boiling_learning.utils.pathutils.resolve",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "boiling_learning.app.paths.studies_path",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 88,
"usage_type": "name"
}
] |
22400150737
|
from PyQt5.QtWidgets import *
from PyQt5.QtCore import pyqtSignal, pyqtSlot, QModelIndex,QItemSelectionModel
from diz import *
import sys
from BD import Orm
from dialog import Dialog
from dizain1_2 import TwoWindow
from dialog2 import Dialog2
bd = Orm()
class InputDialog(QtWidgets.QDialog):
def __init__(self, root, **kwargs):
super().__init__(root, **kwargs)
self.win = root
label = QtWidgets.QLabel('Введите название')
self.edit = QtWidgets.QLineEdit()
button = QtWidgets.QPushButton('Найти')
button.clicked.connect(self.push)
layout = QtWidgets.QVBoxLayout()
layout.addWidget(label)
layout.addWidget(self.edit)
layout.addWidget(button)
self.setLayout(layout)
def push(self):
if self.edit.text():
r = bd.search_mater(self.edit.text())
if r:
self.win.now(r)
self.close()
self.win.hid()
else:
msg = QMessageBox()
msg.setWindowTitle("Ошибка")
msg.setText("Не найдено ")
msg.addButton('Ок', QMessageBox.RejectRole)
msg.exec()
class MainWindow(QtWidgets.QMainWindow):
def __init__(self):
super().__init__()
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
# заголовки для столбцов.
self.ui.tableWidget.setSelectionBehavior(QAbstractItemView.SelectRows)
self.ui.pushButton.clicked.connect(self.addfac)
self.ui.pushButton_2.clicked.connect(self.addmat)
self.ui.pushButton_4.clicked.connect(self.search)
self.ui.pushButton_5.hide()
self.ui.pushButton_5.clicked.connect(self.tomain)
self.now(bd.allmat())
self.ui.pushButton_3.clicked.connect(self.delmat)
self.id=False
def now(self, data):
if data:
self.ui.tableWidget.setEnabled(True)
self.ui.pushButton_3.setEnabled(True)
self.ui.pushButton_4.setEnabled(True)
# ряды и столбцы
self.ui.tableWidget.setRowCount(
len(data)
)
self.ui.tableWidget.setColumnCount(
len(data[0])
)
self.ui.tableWidget.setHorizontalHeaderLabels(
('Id', 'Название материала', 'Фирма', 'Магазин', 'Поставщик',
'Наличие счета', 'Наличие НДС', 'Количество', 'Цена')
)
row = 0
for tup in data:
col = 0
for item in tup:
cellinfo = QTableWidgetItem(str(item))
cellinfo.setFlags(
QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled
)
self.ui.tableWidget.setItem(row, col, cellinfo)
# self.ui.tableWidget.horizontalHeader().setSectionResizeMode(col , QHeaderView.Stretch)
col += 1
row += 1
self.ui.tableWidget.resizeColumnsToContents()
self.ui.tableWidget.horizontalHeader().setSectionResizeMode(col - 1, QHeaderView.Stretch)
else:
self.ui.tableWidget.clear()
self.ui.tableWidget.setEnabled(False)
self.ui.pushButton_3.setEnabled(False)
self.ui.pushButton_4.setEnabled(False)
def addmat(self):
self.dualog = Dialog()
self.dualog.exec()
self.now(bd.allmat())
def addfac(self):
if not self.id:
self.now(bd.allmat())
msg = QMessageBox()
msg.setWindowTitle("Ошибка")
msg.setText("Вы не выбрали не один договор")
msg.addButton('Ок', QMessageBox.RejectRole)
msg.exec()
else:
print(self.id)
self.now(bd.allmat())
self.dualog2 = Dialog2(self.id)
self.dualog2.exec()
self.now(bd.allmat())
def delmat(self):
if not self.id:
self.now(bd.allmat())
msg = QMessageBox()
msg.setWindowTitle("Ошибка")
msg.setText("Вы не выбрали не один договор")
msg.addButton('Ок', QMessageBox.RejectRole)
msg.exec()
else:
print(self.id)
bd.delmat(self.id)
self.now(bd.allmat())
@pyqtSlot(QModelIndex)
def on_tableWidget_clicked(self, index: QModelIndex): # получение индекса строки при нажатие
self.id = int(self.ui.tableWidget.item(index.row(), 0).text())
print(self.id)
@pyqtSlot(QModelIndex)
def on_tableWidget_doubleClicked(self, index: QModelIndex): # получение списка обьектов
r = int(self.ui.tableWidget.item(index.row(), 0).text())
data = bd.allfac(r)
if not data:
msg = QMessageBox()
msg.setWindowTitle("Ошибка")
msg.setText("Нет записей объекта")
msg.addButton('Ок', QMessageBox.RejectRole)
msg.exec()
else:
self.twow = TwoWindow(r)
self.twow.show()
self.twow.now(data)
def search(self):
self.search = InputDialog(self)
self.search.exec()
def hid(self):
self.ui.pushButton_5.show()
self.ui.pushButton_4.hide()
def tomain(self):
self.now(bd.allmat())
self.ui.pushButton_5.hide()
self.ui.pushButton_4.show()
app = QtWidgets.QApplication([])
win = MainWindow()
# win.now(data)
win.show()
sys.exit(app.exec())
|
Vorlogg/BD
|
dizain.py
|
dizain.py
|
py
| 5,829 |
python
|
ru
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "BD.Orm",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "dialog.Dialog",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "dialog2.Dialog2",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtCore.QModelIndex",
"line_number": 138,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtCore.pyqtSlot",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtCore.QModelIndex",
"line_number": 137,
"usage_type": "argument"
},
{
"api_name": "PyQt5.QtCore.QModelIndex",
"line_number": 143,
"usage_type": "name"
},
{
"api_name": "dizain1_2.TwoWindow",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtCore.pyqtSlot",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtCore.QModelIndex",
"line_number": 142,
"usage_type": "argument"
},
{
"api_name": "sys.exit",
"line_number": 177,
"usage_type": "call"
}
] |
71365190588
|
import torch
from torchvision import transforms
from torch.autograd import Variable
from dataset import DatasetFromFolder
from model import Generator
import utils
import argparse
import os
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', required=False, default='facades', help='input dataset')
parser.add_argument('--direction', required=False, default='BtoA', help='input and target image order')
parser.add_argument('--batch_size', type=int, default=1, help='test batch size')
parser.add_argument('--ngf', type=int, default=64)
parser.add_argument('--input_size', type=int, default=1024, help='input size')
params = parser.parse_args()
print(params)
# Directories for loading data and saving results
data_dir = '../Data/' + params.dataset + '/'
save_dir = params.dataset + '_test_results/'
model_dir = params.dataset + '_model/'
if not os.path.exists(save_dir):
os.mkdir(save_dir)
if not os.path.exists(model_dir):
os.mkdir(model_dir)
# Data pre-processing
test_transform = transforms.Compose([transforms.Scale(params.input_size),
transforms.ToTensor(),
transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))])
# Test data
test_data = DatasetFromFolder(data_dir, subfolder='test', direction=params.direction, transform=test_transform)
test_data_loader = torch.utils.data.DataLoader(dataset=test_data,
batch_size=params.batch_size,
shuffle=False)
# Load model
G = Generator(3, params.ngf, 3)
G.cuda()
G.load_state_dict(torch.load(model_dir + 'generator_param.pkl'))
# Test
for i, (input, target) in enumerate(test_data_loader):
# input & target image data
x_ = Variable(input.cuda())
y_ = Variable(target.cuda())
gen_image = G(x_)
gen_image = gen_image.cpu().data
# Show result for test data
utils.plot_test_result(input, target, gen_image, i, training=False, save=True, save_dir=save_dir)
print('%d images are generated.' % (i + 1))
|
togheppi/pix2pix
|
pix2pix_test.py
|
pix2pix_test.py
|
py
| 2,081 |
python
|
en
|
code
| 46 |
github-code
|
6
|
[
{
"api_name": "argparse.ArgumentParser",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "os.mkdir",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "os.mkdir",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms.Compose",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.Scale",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms.ToTensor",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.Normalize",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "dataset.DatasetFromFolder",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "torch.utils",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "model.Generator",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "torch.load",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "torch.autograd.Variable",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "torch.autograd.Variable",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "utils.plot_test_result",
"line_number": 55,
"usage_type": "call"
}
] |
40319402697
|
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.ansibleguy.opnsense.plugins.module_utils.base.api import \
Session
from ansible_collections.ansibleguy.opnsense.plugins.module_utils.base.cls import GeneralModule
class General(GeneralModule):
CMDS = {
'set': 'set',
'search': 'get',
}
API_KEY_PATH = 'bgp'
API_MOD = 'quagga'
API_CONT = 'bgp'
API_CONT_REL = 'service'
API_CMD_REL = 'reconfigure'
FIELDS_CHANGE = [
'as_number', 'id', 'graceful', 'enabled', 'networks',
'redistribute',
]
FIELDS_ALL = FIELDS_CHANGE
FIELDS_TRANSLATE = {
'as_number': 'asnumber',
'id': 'routerid',
}
FIELDS_TYPING = {
'bool': ['enabled', 'graceful'],
'list': ['networks', 'redistribute'],
}
INT_VALIDATIONS = {
'as_number': {'min': 1, 'max': 4294967295},
}
def __init__(self, module: AnsibleModule, result: dict, session: Session = None):
GeneralModule.__init__(self=self, m=module, r=result, s=session)
|
ansibleguy/collection_opnsense
|
plugins/module_utils/main/frr_bgp_general.py
|
frr_bgp_general.py
|
py
| 1,066 |
python
|
en
|
code
| 158 |
github-code
|
6
|
[
{
"api_name": "ansible_collections.ansibleguy.opnsense.plugins.module_utils.base.cls.GeneralModule",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "ansible.module_utils.basic.AnsibleModule",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "ansible_collections.ansibleguy.opnsense.plugins.module_utils.base.api.Session",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "ansible_collections.ansibleguy.opnsense.plugins.module_utils.base.cls.GeneralModule.__init__",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "ansible_collections.ansibleguy.opnsense.plugins.module_utils.base.cls.GeneralModule",
"line_number": 36,
"usage_type": "name"
}
] |
20861131743
|
import requests
import os
import wget
import subprocess
def update_mindustry():
global response
global be_wrapper
global current_build
download_url = "https://github.com/Anuken/MindustryBuilds/releases/download/" + str(current_build)
download_url = download_url + "/Mindustry-BE-Desktop-" + str(current_build) + ".jar"
os.system("rm -f " + os.path.join(be_wrapper, "Mindustry.jar"))
wget.download(download_url, os.path.join(be_wrapper, "Mindustry.jar"))
bfile = open(be_wrapper + "/last.txt", "w")
bfile.write(str(current_build))
print()
def run_mindustry():
global be_wrapper
global current_build
if not os.path.exists(os.path.join(be_wrapper, "Mindustry.jar")):
print("The Mindustry jar file does not exist. Download it now?")
if input("Update now? (Y/N):").lower() == "y":
update_mindustry()
else:
print("Exiting")
exit(0)
os.system("java -jar " + be_wrapper + "/Mindustry.jar")
try:
subprocess.check_call("java -version", shell=True)
except subprocess.CalledProcessError as x:
if not x.returncode == 127:
raise
response = requests.get("https://api.github.com/repos/Anuken/MindustryBuilds/releases/latest").json()
current_build = int(response['tag_name'])
home = os.path.expanduser("~")
be_wrapper = os.path.join(home, "BEWrapper")
if not os.path.exists(be_wrapper):
os.mkdir(be_wrapper)
try:
build_file = open(be_wrapper + "/last.txt", "r")
saved_build = int(build_file.read())
build_file.close()
except FileNotFoundError:
saved_build = 0
except ValueError:
saved_build = 0
if saved_build < current_build:
print("Your Mindustry build seems to be out of date by " + str(current_build - saved_build) + " releases.")
if input("Update now? (Y/N):").lower() == "y":
update_mindustry()
print("Mindustry appears to be up to date!")
print("Running Mindustry")
run_mindustry()
|
ILiekMelons/MindustryBELauncher
|
main.py
|
main.py
|
py
| 1,958 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "os.system",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "wget.download",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "os.system",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "subprocess.check_call",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "subprocess.CalledProcessError",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "requests.get",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "os.path.expanduser",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "os.mkdir",
"line_number": 48,
"usage_type": "call"
}
] |
20656199478
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Create a Milo input file from a frequency calculation.
It must be a Gaussian 09 or 16 high-precision frequency calculation. You
request this with '# freq=(hpmodes) ... '.
"""
import argparse
import sys
from milo_1_0_3 import atom
from milo_1_0_3 import containers
from milo_1_0_3 import enumerations as enums
from milo_1_0_3 import exceptions
from milo_1_0_3 import program_state as ps
def main():
"""Parse frequency file and print to new Milo input."""
parser = argparse.ArgumentParser(description="Make a Milo input file "
"from a high-precision Gaussian frequency"
" calculation.\n")
parser.add_argument('infile', nargs='?', type=argparse.FileType('r'),
default=sys.stdin, help="Frequency calculation file. "
"<stdin> by default.")
parser.add_argument('outfile', nargs='?', type=argparse.FileType('w'),
default=sys.stdout, help="New Milo input file. "
"<stdout> by default.")
parser.add_argument('-v', '--verbose', action='count', default=0,
help="Print other parameters in $job section. "
"-v for common parameters, -vv for all parameters")
args = parser.parse_args()
program_state = ps.ProgramState()
try:
parse_gaussian_header(args.infile, program_state)
parse_gaussian_charge_spin(args.infile, program_state)
parse_gaussian_molecule_data(args.infile, program_state)
parse_gaussian_frequency_data(args.infile, program_state)
parse_gaussian_isotope_data(args.infile, program_state)
print_job_section(args.outfile, program_state, args.verbose)
print_output_comment(args.infile, args.outfile)
print_molecule_section(args.outfile, program_state)
print_frequency_data_section(args.outfile, program_state)
except Exception as e:
print("Oh no! It looks like there was an error!")
print("Error message:", e)
print("\nPython error details:")
raise
def parse_gaussian_header(input_iterable, program_state):
"""
Parse gaussian_header from frequency file.
Looking for:
******************************************
------------------------------
# opt freq=hpmodes m062x/3-21g
------------------------------
Result:
gaussian_header = 'm062x/3-21g'
"""
past_warning = False
lines = list()
for line in input_iterable:
if "*****" in line:
past_warning = True
if past_warning and "-----" in line:
for next_line in input_iterable:
if "-----" in next_line:
break
lines.append(next_line[1:].strip("\n"))
clean_line = "".join(lines).strip()
if "hpmodes" not in clean_line.casefold():
raise exceptions.InputError("Must be high-precision frequency "
"calculation. Use 'freq=hpmodes'.")
tokens = clean_line.split()
tokens = [x for x in tokens if "#" not in x
and "opt" not in x.casefold()
and "freq" not in x.casefold()]
program_state.gaussian_header = " ".join(tokens)
return
raise exceptions.InputError("Error parsing gaussian_header.")
def parse_gaussian_charge_spin(input_iterable, program_state):
"""
Parse charge and spin multiplicity from frequency file.
Looking for:
---------------------------------------------
Symbolic Z-matrix:
Charge = 0 Multiplicity = 1
O -0.19334 -0.19871 0.
"""
for line in input_iterable:
if "Charge =" in line:
program_state.charge = int(line.split()[2])
program_state.spin = int(line.split()[5])
return
raise exceptions.InputError("Error parsing charge and spin multiplicity.")
def parse_gaussian_molecule_data(input_iterable, program_state):
"""
Parse molecule data from frequency file.
Will pull the last "Standard orientation:" in the log file, or the last
"Input orientation:" if there is no "Standard orientation:" (for example,
if the nosymm keyword is used).
Looking for:
Standard orientation:
---------------------------------------------------------------------
Center Atomic Atomic Coordinates (Angstroms)
Number Number Type X Y Z
---------------------------------------------------------------------
"""
for line in input_iterable:
if "Harmonic frequencies (cm**-1)" in line:
return
if "Input orientation:" in line or "Standard orientation:" in line:
positions = containers.Positions()
for coordinate_line in input_iterable:
if ("Rotational constants" in coordinate_line or
"Distance matrix" in coordinate_line):
break
coordinates = coordinate_line.split()
if coordinates[0].isnumeric():
x = float(coordinates[3])
y = float(coordinates[4])
z = float(coordinates[5])
positions.append(x, y, z, enums.DistanceUnits.ANGSTROM)
program_state.input_structure = positions
raise exceptions.InputError("Error parsing molecule data.")
def parse_gaussian_frequency_data(input_iterable, program_state):
"""
Parse frequency data from frequency file.
Will pull the first time they are listed (with high-precision).
Looking for:
Frequencies --- 1682.1354 3524.4296 3668.7401
Reduced masses --- 1.0895 1.0389 1.0827
Force constants --- 1.8163 7.6032 8.5864
IR Intensities --- 52.8486 4.2243 0.3831
Coord Atom Element:
1 1 8 -0.00000 0.00000 -0.00000
2 1 8 0.00000 -0.00000 -0.07070
3 1 8 -0.07382 0.04553 -0.00000
1 2 1 0.00000 0.00000 0.00000
2 2 1 0.39258 0.60700 0.56106
3 2 1 0.58580 -0.36126 -0.42745
1 3 1 0.00000 -0.00000 0.00000
2 3 1 -0.39258 -0.60700 0.56106
3 3 1 0.58580 -0.36126 0.42745
Harmonic frequencies (cm**-1), IR intensities (KM/Mole), Raman scatt
activities (A**4/AMU), depolarization ratios for plane and unpolariz
"""
has_started = False
for line in input_iterable:
if "Frequencies ---" in line:
has_started = True
for frequency in line.split()[2:]:
program_state.frequencies.append(float(frequency),
enums.FrequencyUnits
.RECIP_CM)
elif "Reduced masses ---" in line:
for reduced_mass in line.split()[3:]:
program_state.reduced_masses\
.append(float(reduced_mass), enums.MassUnits.AMU)
elif "Force constants ---" in line:
for force_constant in line.split()[3:]:
program_state.force_constants\
.append(float(force_constant), enums.ForceConstantUnits
.MILLIDYNE_PER_ANGSTROM)
elif "Coord Atom Element:" in line:
data_in_columns = list()
for coordinate_line in input_iterable:
if ("Harmonic frequencies (cm**-1)" in coordinate_line
or " " in coordinate_line):
break
data_in_columns.append(coordinate_line.split()[3:])
data_in_rows = list(zip(*data_in_columns))
for frequency in data_in_rows:
program_state.mode_displacements.append(containers.Positions())
for x, y, z in zip(*[iter(frequency)] * 3):
program_state.mode_displacements[-1].append(float(x),
float(y), float(z), enums.DistanceUnits.ANGSTROM)
elif has_started and "activities (A**4/AMU)" in line:
return
raise exceptions.InputError("Error parsing frequency data.")
def parse_gaussian_isotope_data(input_iterable, program_state):
"""
Parse isotope and atomic number data from frequency file.
Looking for:
-------------------
- Thermochemistry -
-------------------
Temperature 298.150 Kelvin. Pressure 1.00000 Atm.
Atom 1 has atomic number 8 and mass 15.99491
Atom 2 has atomic number 1 and mass 1.00783
Atom 3 has atomic number 1 and mass 1.00783
Molecular mass: 18.01056 amu.
"""
for line in input_iterable:
if "Thermochemistry" in line:
atoms = list()
for mass_line in input_iterable:
if "Molecular mass" in mass_line:
break
split_line = mass_line.split()
if split_line[0] == "Atom":
atomic_number = int(split_line[5])
atoms.append(atom.Atom.from_atomic_number(atomic_number))
atoms[-1].change_mass(split_line[8])
program_state.atoms = atoms
return
raise exceptions.InputError("Error parsing isotope data.")
def print_section(output_iterable, section_name, inside):
"""Print a section to output_iterable."""
stdout = sys.stdout
sys.stdout = output_iterable
print(f"${section_name}")
print(inside)
print("$end")
print()
sys.stdout = stdout
def print_job_section(output_iterable, program_state, verbose):
"""
Print the $job section with gaussian_header from program_state.
verbose controls how other job parameters are printed.
"""
section = list()
section.append(" gaussian_header "
f"{program_state.gaussian_header}")
if verbose >= 1:
section.append(" # step_size 1.00 # in femtoseconds")
section.append(" # max_steps 100 # or no_limit")
section.append(" # temperature 298.15 # in kelvin")
section.append(" # phase bring_together n m"
" # or push_apart n m")
section.append(" # memory 24 # in GB")
section.append(" # processors 24")
section.append(" # random_seed generate # or an "
"integer")
if verbose >= 2:
section.append(" # oscillator_type quasiclassical")
section.append(" # geometry_displacement off")
section.append(" # rotational_energy off")
section.append(" # energy_boost off")
section.append(" # integration_algorithm verlet")
section.append(" # program gaussian16")
section.append(" # fixed_mode_direction n 1 # or n -1")
print_section(output_iterable, "job", "\n".join(section))
def print_molecule_section(output_iterable, program_state):
"""Print $molecule section with data from program_state."""
section = list()
section.append(f" {program_state.charge} {program_state.spin}")
for _atom, (x, y, z) in zip(program_state.atoms,
program_state.input_structure.as_angstrom()):
section.append(f" {_atom.symbol} {x:12.6f} {y:12.6f} {z:12.6f}")
print_section(output_iterable, "molecule", "\n".join(section))
section = list()
for i, _atom in enumerate(program_state.atoms, 1):
section.append(f" {i:< 3d} {_atom.mass:10.5f}")
print_section(output_iterable, "isotope", "\n".join(section))
def print_frequency_data_section(output_iterable, program_state):
"""Print $frequencies section with data from program_state."""
section = list()
for frequency, reduced_mass, force_constant, mode_displacement in zip(
program_state.frequencies.as_recip_cm(),
program_state.reduced_masses.as_amu(),
program_state.force_constants.as_millidyne_per_angstrom(),
program_state.mode_displacements):
section.append(f" {frequency:10.4f} {reduced_mass:7.4f} "
f"{force_constant:7.4f}")
for x, y, z in mode_displacement.as_angstrom():
section.append(f" {x:8.5f} {y:8.5f} {z:8.5f}")
section.append("\n")
section.pop()
print_section(output_iterable, "frequency_data", "".join(section))
def print_output_comment(input_iterable, output_iterable):
"""Print comment with frequency file name and date of parsing."""
from datetime import datetime
import os
comment = list()
comment.append(" Frequency and molecule data parsed ")
if input_iterable != sys.stdin:
comment.append("from ")
comment.append(os.path.basename(input_iterable.name))
comment.append(" ")
else:
try:
name = os.readlink('/proc/self/fd/0').split('/')[-1].split('.')[0]
comment.append("from ")
comment.append(name)
comment.append(" ")
except FileNotFoundError:
comment.append("from <stdin> ")
comment.append(datetime.now().strftime("on %d-%b-%Y at %X"))
print_section(output_iterable, "comment", "".join(comment))
if __name__ == "__main__":
main()
|
DanielEss-lab/milo
|
milo_1_0_3/tools/parse_frequencies.py
|
parse_frequencies.py
|
py
| 13,885 |
python
|
en
|
code
| 3 |
github-code
|
6
|
[
{
"api_name": "argparse.ArgumentParser",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "argparse.FileType",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "sys.stdin",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "argparse.FileType",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "milo_1_0_3.program_state.ProgramState",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "milo_1_0_3.program_state",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "milo_1_0_3.exceptions.InputError",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "milo_1_0_3.exceptions",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "milo_1_0_3.exceptions.InputError",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "milo_1_0_3.exceptions",
"line_number": 85,
"usage_type": "name"
},
{
"api_name": "milo_1_0_3.exceptions.InputError",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "milo_1_0_3.exceptions",
"line_number": 103,
"usage_type": "name"
},
{
"api_name": "milo_1_0_3.containers.Positions",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "milo_1_0_3.containers",
"line_number": 125,
"usage_type": "name"
},
{
"api_name": "milo_1_0_3.enumerations.DistanceUnits",
"line_number": 135,
"usage_type": "attribute"
},
{
"api_name": "milo_1_0_3.enumerations",
"line_number": 135,
"usage_type": "name"
},
{
"api_name": "milo_1_0_3.exceptions.InputError",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "milo_1_0_3.exceptions",
"line_number": 137,
"usage_type": "name"
},
{
"api_name": "milo_1_0_3.enumerations.FrequencyUnits",
"line_number": 170,
"usage_type": "attribute"
},
{
"api_name": "milo_1_0_3.enumerations",
"line_number": 170,
"usage_type": "name"
},
{
"api_name": "milo_1_0_3.enumerations.MassUnits",
"line_number": 175,
"usage_type": "attribute"
},
{
"api_name": "milo_1_0_3.enumerations",
"line_number": 175,
"usage_type": "name"
},
{
"api_name": "milo_1_0_3.enumerations.ForceConstantUnits",
"line_number": 179,
"usage_type": "attribute"
},
{
"api_name": "milo_1_0_3.enumerations",
"line_number": 179,
"usage_type": "name"
},
{
"api_name": "milo_1_0_3.containers.Positions",
"line_number": 190,
"usage_type": "call"
},
{
"api_name": "milo_1_0_3.containers",
"line_number": 190,
"usage_type": "name"
},
{
"api_name": "milo_1_0_3.enumerations.DistanceUnits",
"line_number": 193,
"usage_type": "attribute"
},
{
"api_name": "milo_1_0_3.enumerations",
"line_number": 193,
"usage_type": "name"
},
{
"api_name": "milo_1_0_3.exceptions.InputError",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "milo_1_0_3.exceptions",
"line_number": 196,
"usage_type": "name"
},
{
"api_name": "milo_1_0_3.atom.Atom.from_atomic_number",
"line_number": 222,
"usage_type": "call"
},
{
"api_name": "milo_1_0_3.atom.Atom",
"line_number": 222,
"usage_type": "attribute"
},
{
"api_name": "milo_1_0_3.atom",
"line_number": 222,
"usage_type": "name"
},
{
"api_name": "milo_1_0_3.exceptions.InputError",
"line_number": 226,
"usage_type": "call"
},
{
"api_name": "milo_1_0_3.exceptions",
"line_number": 226,
"usage_type": "name"
},
{
"api_name": "sys.stdout",
"line_number": 231,
"usage_type": "attribute"
},
{
"api_name": "sys.stdout",
"line_number": 232,
"usage_type": "attribute"
},
{
"api_name": "sys.stdout",
"line_number": 239,
"usage_type": "attribute"
},
{
"api_name": "sys.stdin",
"line_number": 311,
"usage_type": "attribute"
},
{
"api_name": "os.path.basename",
"line_number": 313,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 313,
"usage_type": "attribute"
},
{
"api_name": "os.readlink",
"line_number": 317,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 323,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 323,
"usage_type": "name"
}
] |
19250997206
|
import numpy as np
import cv2
from matplotlib import pyplot as plt
def SI(img, x, y, p):
val = np.sum(img[y-p:y+p, x-p:x+p])
return min(max(val, 0), 255)
#Read grayscale image and conversion to float64
img=np.float64(cv2.imread('../Image_Pairs/FlowerGarden2.png',0))
(h,w) = img.shape
print("Image dimension:",h,"rows x",w,"columns")
#Direct method
t1 = cv2.getTickCount()
direct_method = cv2.copyMakeBorder(img,0,0,0,0,cv2.BORDER_REPLICATE)
for y in range(1,h):
for x in range(1,w):
val = img[y, x] - img[y-1, x]
direct_method[y,x] = min(max(val,0),255)
t2 = cv2.getTickCount()
time = (t2 - t1)/ cv2.getTickFrequency()
print("Direct method:",time,"s")
plt.figure(figsize=(8, 6))
plt.imshow(direct_method, cmap='gray')
plt.title('Y derivate convolution - Direct method')
plt.axis('off')
plt.savefig("conv_direct_y_derivate.png", bbox_inches='tight')
plt.close()
#Method filter2D
t1 = cv2.getTickCount()
kernel = np.array([-1, 1])
filter2d_method = cv2.filter2D(img,-1,kernel)
t2 = cv2.getTickCount()
time = (t2 - t1)/ cv2.getTickFrequency()
print("Method filter2D :",time,"s")
plt.figure(figsize=(8, 6))
plt.imshow(direct_method, cmap='gray')
plt.title('Y derivate convolution - filter 2D')
plt.axis('off')
plt.savefig("conv_filter2D_y_derivate.png", bbox_inches='tight')
plt.close()
img_diff = filter2d_method - direct_method
plt.figure(figsize=(8, 6))
plt.imshow(img_diff, cmap='gray', vmax=255, vmin=0)
plt.title("Y derivate result difference between the direct and filter2D")
plt.axis('off')
plt.savefig("difference_y_derivate_direct-filter2D.png", bbox_inches='tight')
plt.close()
center_y = h // 2
center_x = x // 2
p = 1
q = 50
SI_image = cv2.copyMakeBorder(img,0,0,0,0,cv2.BORDER_REPLICATE)
for i in range(-q//2, q//2 + 1, 1):
for j in range(-q//2, q//2 + 1, 1):
SI_image[center_y + i, center_x + j] = SI(img, center_y + i, center_x + j, p)
plt.figure(figsize=(8, 6))
plt.imshow(img, cmap='gray')
plt.title('Original Image')
plt.axis('off')
plt.savefig("original_image.png", bbox_inches='tight')
plt.close()
plt.figure(figsize=(8, 6))
plt.imshow(SI_image, cmap='gray')
plt.title('SI Function with p=1 on a square of size 50 on the center')
plt.axis('off')
plt.savefig("SI_function.png", bbox_inches='tight')
plt.close()
|
gpspelle/image-mining
|
TP1/TP_Features_OpenCV/modified_Convolutions.py
|
modified_Convolutions.py
|
py
| 2,273 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "numpy.sum",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "numpy.float64",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "cv2.getTickCount",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "cv2.copyMakeBorder",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "cv2.BORDER_REPLICATE",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "cv2.getTickCount",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "cv2.getTickFrequency",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.axis",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.close",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "cv2.getTickCount",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "cv2.filter2D",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "cv2.getTickCount",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "cv2.getTickFrequency",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.axis",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.close",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.axis",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.close",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "cv2.copyMakeBorder",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "cv2.BORDER_REPLICATE",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 70,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.axis",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.close",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.axis",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.close",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 80,
"usage_type": "name"
}
] |
32544533358
|
import json
import glob
from flask import Flask , send_file
import os
from flask_cors import CORS
app = Flask (__name__)
cors = CORS(app)
@app.route('/')
def DownloadMergedJson() -> str:
result = {}
logs = {}
node_ids =[]
for f in glob.glob(os.path.join("..", "history_*.json")):
print(str(f))
node_ids.append(str(f).split('.')[2].split('_')[1])
result["all_nodes"] = node_ids
for f in glob.glob(os.path.join("..", "history_*.json")):
node_id = str(f).split('.')[2].split('_')[1]
with open(f, "rb") as infile:
result[node_id] = json.load(infile)
return result
app.run()
|
SiyiGuo/COMP90020
|
pythonproxy/getNodeData.py
|
getNodeData.py
|
py
| 648 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "flask.Flask",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "flask_cors.CORS",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "glob.glob",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "json.load",
"line_number": 23,
"usage_type": "call"
}
] |
33874326793
|
# 507/206 Homework 6 Part 2
import requests
from bs4 import BeautifulSoup
#### Part 2 ####
print('\n*********** PART 2 ***********')
print('Michigan Daily -- MOST READ\n')
### Your Part 2 solution goes here
html = requests.get('https://www.michigandaily.com/').text
soup = BeautifulSoup(html, 'html.parser')
# searching_div = soup.find('div', attrs = {"class":"panel-pane pane-mostread"})
searching_div= soup.find('div', attrs = {'class': "view view-most-read view-id-most_read view-display-id-panel_pane_1 view-dom-id-99658157999dd0ac5aa62c2b284dd266"})
# print(searching_div)
mr = searching_div.select("ol li")
for li in mr:
print(li.text)
# print(mr)
# for a in mr:
# abstract = a.select("ol")
# print(abstract)
# for li in abstract:
# print(li)
|
xckou/SI507-HW06-xckou
|
hw6_part2.py
|
hw6_part2.py
|
py
| 762 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "requests.get",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 13,
"usage_type": "call"
}
] |
25144954100
|
import requests
import collections
import csv
from bs4 import BeautifulSoup
from bs4.element import Tag
class ParseAnimals:
def __init__(self) -> None:
self.animals_names = {}
def parse(self) -> None:
"""
Make a while loop until calegory letter != Я
Saves each animal data on the page in dict: key - letter, value - list of all animals on the page
"""
url = 'https://ru.wikipedia.org/wiki/Категория:Животные_по_алфавиту'
letter = ''
while letter != 'Я':
data = self._get_page(url)
self._parse_animal_on_page(data=data)
url = self._check_end_page(data=data)
letter = collections.deque(self.animals_names, maxlen=1)[0][0]
print(letter)
self._get_csv()
def _get_page(self, url: str) -> Tag:
"""
Make a request on the page and gets all page data
"""
request = requests.get(url)
soup = BeautifulSoup(request.text, 'lxml')
return soup.find('div', id='mw-pages')
def _parse_animal_on_page(self, data: Tag) -> None:
"""
Saves all animals on the page in a dict with key = category (letter)
"""
for el in data.find_all('div', class_='mw-category-group'):
category = el.h3.text
animal_names = [[i.text, f"https://ru.wikipedia.org{i.a['href']}"] for i in el.find_all('li')]
if not self.animals_names.get(category):
self.animals_names[category] = []
self.animals_names[category] = self.animals_names[category] + animal_names
def _check_end_page(self, data: Tag) -> str:
"""
Return an url to the next page
"""
hrf = data.find_all('a')[-1]
return f"https://ru.wikipedia.org{hrf['href']}"
def _get_csv(self) -> None:
"""
Saves data (dict) into csv file
"""
with open('animals_names_count.csv', 'w') as f:
writer = csv.writer(f)
writer.writerows([[f'{k}, {len(v)}'] for k, v in self.animals_names.items()])
if __name__ == '__main__':
parse = ParseAnimals()
parse.parse()
|
enamsaraev/tetrika-test
|
task2/solution.py
|
solution.py
|
py
| 2,239 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "collections.deque",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "bs4.element.Tag",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "bs4.element.Tag",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "bs4.element.Tag",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "csv.writer",
"line_number": 66,
"usage_type": "call"
}
] |
33273926923
|
from PDBParseBase import PDBParserBase #get_site_header_seq_info
import time, os,datetime,logging,gzip,pickle #get_site_header_seq_info
def mkdir(path):
#Created uncompress path folder
isExists=os.path.exists(path)
if not isExists:
os.makedirs(path)
print(path + " Created folder sucessful!")
return True
else:
#print ("this path is exist")
return False
def get_site_header_seq_info(rootdir,savefilepath):
"""extract header\sequence\site\remark800 info in rootdir.
and then, save them as a pickle content with list[1_site,2_header,3_sequence]
rootdir = "/home/RaidDisk/pdbfiles/updb"
savefilepath = "/home/zhaiyh884/20190614_new_data/0615_data"
scan all pdb files need about 60 min.
"""
count = 0
counter_mem = 0
pdbbase = PDBParserBase()
pdb_seq_info = pdbbase.get_sequence_fromATOM('/home/RaidDisk/pdbfiles/updb/pdb/a2/pdb2a2q.ent')
print(pdb_seq_info)
print(pdb_eq_info)
"""
#test cetern item
pdb_header_info = pdbbase.get_header_info('/home/RaidDisk/pdbfiles/updb/pdb/a2/pdb2a2q.ent')
pdb_site_info = pdbbase.get_site_info('/home/RaidDisk/pdbfiles/updb/pdb/a2/pdb2a2q.ent')
pdb_seq_info = pdbbase.get_sequence_fromSEQ('/home/RaidDisk/pdbfiles/updb/pdb/a2/pdb2a2q.ent')
print(pdb_header_info)
print(pdb_site_info)
print(pdb_seq_info) """
for parent,dirnames,filenames in os.walk(rootdir):
for filename in filenames:
data_3_items = []
#analyzedata
pdb_site_info = pdbbase.get_site_info(os.path.join(parent,filename))
data_3_items.append(pdb_site_info)
if not pdb_site_info :
print("do not have site:" + filename)
"in order to save some time"
continue
pdb_header_info = pdbbase.get_header_info(os.path.join(parent,filename))
data_3_items.append(pdb_header_info)
#print(pdb_header_info)
pdb_seq_from_SEQ__info = pdbbase.get_sequence_fromSEQ(os.path.join(parent,filename))
data_3_items.append(pdb_seq_from_SEQ__info)
print("pdb_seq_from_SEQ__info")
print(pdb_seq_from_SEQ__info)
pdb_seq_from_ATOM_info = pdbbase.get_sequence_fromATOM(os.path.join(parent,filename))
data_3_items.append(pdb_seq_from_ATOM_info)
print("pdb_seq_from_ATOM_info")
print(pdb_seq_from_ATOM_info)
#save data
if not pdb_site_info :
pass
else:
dirname = filename[4:6]
new_Filepath = savefilepath +"/" + str(dirname)+"/"
mkdir(new_Filepath)
new_filename = filename[3:7] + ".pickle"
with open(new_Filepath + new_filename,"wb") as dbFile:
pickle.dump(data_3_items,dbFile)
"""with open(new_Filepath + new_filename,"rb") as dbFile:
file = pickle.load(dbFile) """
pass
pass
def find_memberain_protein(rootdir,savefilepath):
#find all protein that header have "mem"in it and save them into savefilepath
count = 0
counter_mem = 0
pdbbase = PDBParserBase()
pdb_header_info = {}
for parent,dirnames,filenames in os.walk(rootdir):
for filename in filenames:
count = count + 1
dirname = filename[3:7]
pdb_header_info = pdbbase.get_header_info(os.path.join(parent,filename))
if "MEM" in pdb_header_info["HEADER_classification"]:
counter_mem = counter_mem + 1
cmd = 'cp ' + str(os.path.join(parent,filename)) + ' ' + str(os.path.join(savefilepath,filename))
os.system(cmd)
pass
def find_all_sites(rootdir):
# use the pickles that contain header\sequence\site\remark800 info
#rootdir = "/home/zhaiyh884/20190614_new_data/0615_data"
#
total_site = []
description_null = 0
for parent, dirnames, filenames in os.walk(rootdir):
for filename in filenames:
# walk into one file
pro_id = filename[0:4]
file_path = os.path.join(parent, filename)
with open(file_path, "rb") as dbFile:
file = pickle.load(dbFile)
# print(file)
site = file[0]
header = file[1]
seq = file[2]
item_dict = {}
for item in site:
#{'1KMH_A': [{'position': {'51': 'G', '65': 'L', '131': 'E', '274': 'M', '297': 'R'}, 'site_description': 'RESIDUE TTX B 499'}],
# '1KMH_B': [{'position': {'81': 'A', '82': 'T', '83': 'D'}, 'site_description': 'RESIDUE TTX B 499'}]}
#in this loop item means protein_sequence name: 1KMH_A
sites = []
for site_item in site[item]:
#site_item means every record in one sequence
description = site_item["site_description"]
descriptions = description.split()
try:
#find the binding name
if "res" in descriptions[1]:
sites.append(descriptions[2])
else:
sites.append(descriptions[1])
except IndexError:
description_null = description_null+1
item_dict[item] = sites
total_site.append(item_dict)
# print(total_site)
print("len(membrane_total_site):")
print(len(total_site))
print("null_discription:")
print(description_null)
with open("/home/zhaiyh884/20190614_new_data/total_site.pickle", "wb") as dbFile:
pass
pickle.dump(total_site, dbFile)
pass
def count_sites(file):
#used to count sites and analyzedata.
#file = "/home/zhaiyh884/20190614_new_data/total_site.pickle"
#scan all the data and count every site's apperance
with open(file, "rb") as dbFile:
file = pickle.load(dbFile)
site_dicts = {}
total_site_num = 0
for item in file:
for seq_id in item:
#print(seq_id)
for site_name in item[seq_id]:
print(site_name)
site_dicts[site_name] = site_dicts[site_name] + 1 if site_name in site_dicts else 1
total_site_num = total_site_num + 1
#if site_name in site_dicts.keys():
# site_dicts[site_name] = site_dicts[site_name] + 1 if site_name in site_dicts else 1
#print(site_name)
with open("/home/zhaiyh884/20190614_new_data/site_numbers.pickle", "wb") as dbFile:
pickle.dump(site_dicts, dbFile)
print(site_dicts)
print("total_site_num:")
print(total_site_num)
print("site_dicts items num:")
print(len(site_dicts))
def sites_anylize():
with open("site_numbers.pickle","rb") as dbFile:
file = pickle.load(dbFile)
with open("hetlist.pickle","rb") as dbFile_drug:
file_drug = pickle.load(dbFile_drug)
sites_number = 0
number_counter = {}
drug_site = {}
for site_name in file:
if site_name in file_drug:
sites_number = sites_number + file[site_name]
# used to count all numbers of drugs_binding object
number_of_site = file[site_name]
# number_of_site used to sign the numbers which apperence
number_counter[number_of_site] = number_counter[number_of_site] + 1 if number_of_site in number_counter else 1
# the dict to store the number of times
drug_site[site_name] = file[site_name]
#form a new site of drug sites
print(sites_number)
print(number_counter)
print(sorted(file.items(),key=lambda x:x[1]))
print("#@!#!$!@#%!#%")
print(sorted(drug_site.items(),key=lambda x:x[1]))
pass
def find_memberain_sites(rootdir):
# use the pickles that contain header\sequence\site\remark800 info
#rootdir = "/home/zhaiyh884/20190614_new_data/0615_data"
#
total_site = []
description_null = 0
for parent, dirnames, filenames in os.walk(rootdir):
for filename in filenames:
# walk into one file
pro_id = filename[0:4]
file_path = os.path.join(parent, filename)
with open(file_path, "rb") as dbFile:
file = pickle.load(dbFile)
# print(file)
site = file[0]
header = file[1]
seq = file[2]
#select membrane protein
if "MEM" not in header["HEADER_classification"]:
continue
# use site info only
item_dict = {}
for item in site:
#{'1KMH_A': [{'position': {'51': 'G', '65': 'L', '131': 'E', '274': 'M', '297': 'R'}, 'site_description': 'RESIDUE TTX B 499'}],
# '1KMH_B': [{'position': {'81': 'A', '82': 'T', '83': 'D'}, 'site_description': 'RESIDUE TTX B 499'}]}
#in this loop item means protein_sequence name: 1KMH_A
sites = []
for site_item in site[item]:
#site_item means every record in one sequence
description = site_item["site_description"]
descriptions = description.split()
try:
#find the binding name
if "res" in descriptions[1]:
sites.append(descriptions[2])
else:
sites.append(descriptions[1])
except IndexError:
description_null = description_null+1
item_dict[item] = sites
total_site.append(item_dict)
# print(total_site)
print("len(membrane_total_site):")
print(len(total_site))
print("null_discription:")
print(description_null)
with open("/home/zhaiyh884/20190614_new_data/membrane_total_site.pickle", "wb") as dbFile:
pass
pickle.dump(total_site, dbFile)
pass
def find_drug_releated_protein(rootdir):
# use the pickles that contain header\sequence\site\remark800 info
#rootdir = "/home/zhaiyh884/20190614_new_data/0615_data"
#find the difference between proteins class and drug-releated-proteins class
with open("hetlist.pickle","rb") as dbFile_drug:
file_drug = pickle.load(dbFile_drug)
protein_classfication = []
drug_protein_classfication = []
protein_dict = {}
drug_releated_protein_dict = {}
description_null = 0
for parent, dirnames, filenames in os.walk(rootdir):
for filename in filenames:
# walk into one file
pro_id = filename[0:4]
file_path = os.path.join(parent, filename)
with open(file_path, "rb") as dbFile:
file = pickle.load(dbFile)
# print(file)
site = file[0]
header = file[1]
seq = file[2]
classification = header["HEADER_classification"]
protein_dict[classification] = protein_dict[classification] + 1 if classification in protein_dict else 1
#print(protein_dict)
drug_releated_protein_flag = 0
for item in site:
#{'1KMH_A': [{'position': {'51': 'G', '65': 'L', '131': 'E', '274': 'M', '297': 'R'}, 'site_description': 'RESIDUE TTX B 499'}],
# '1KMH_B': [{'position': {'81': 'A', '82': 'T', '83': 'D'}, 'site_description': 'RESIDUE TTX B 499'}]}
#in this loop item means protein_sequence name: 1KMH_A
sites = []
for site_item in site[item]:
#site_item means every record in one sequence
description = site_item["site_description"]
descriptions = description.split()
try:
#find the binding_object name
if "res" in descriptions[1]:
binding_object = descriptions[2]
elif "RESIDUES" in description and "THROUGH" in description:
binding_object = descriptions[2]
else:
binding_object = descriptions[1]
except IndexError:
description_null = description_null+1
#sites.append(binding_object)
if binding_object in file_drug:
drug_releated_protein_flag = 1
#print(drug_releated_protein_flag)
if drug_releated_protein_flag ==1:
drug_releated_protein_dict[classification] = drug_releated_protein_dict[classification] + 1 if classification in drug_releated_protein_dict else 1
"""item_dict[item] = sites
total_site.append(item_dict)
# print(total_site)
print("len(membrane_total_site):")
print(len(total_site))
print("null_discription:")
print(description_null)"""
print(protein_dict)
print("!@#$!@#################$!@%#!$^%$#@^$%&^#$%&")
print(drug_releated_protein_dict)
with open("/home/zhaiyh884/20190614_new_data/drug_and_nondrug_protein_classfication.pickle", "wb") as dbFile:
pass
pickle.dump(protein_dict, dbFile)
pickle.dump(drug_releated_protein_dict, dbFile)
pass
if __name__ == "__main__":
start = datetime.datetime.now()
#1 extract all needed infomation from pdb
rootdir = "/home/RaidDisk/pdbfiles/updb"
savefilepath = "/home/zhaiyh884/20190614_new_data/0615_data"
get_site_header_seq_info(rootdir,savefilepath)
#rootdir = "/home/zhaiyh884/20190614_new_data/0615_data"
#2 find_all_site
#find_all_sites(rootdir)
#2 or find_memberain_sites
#find_memberain_sites(rootdir)
#3 count site numbers
#file = "/home/zhaiyh884/20190614_new_data/membrane_total_site.pickle"
#file = "/home/zhaiyh884/20190614_new_data/total_site.pickle"
#count_sites(file)
#4
#sites_anylize()
#5
#rootdir = "/home/zhaiyh884/20190614_new_data/0615_data"
#find_drug_releated_protein(rootdir)
end = datetime.datetime.now()
print("alltime = ")
print (end-start)
|
Rio56/deeplearning
|
DTP_deeplearning/0618_新数据处理代码及文件/drug_target_data_0617.py
|
drug_target_data_0617.py
|
py
| 15,010 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "os.path.exists",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "PDBParseBase.PDBParserBase",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "os.walk",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "pickle.dump",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "PDBParseBase.PDBParserBase",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "os.walk",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 97,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 100,
"usage_type": "attribute"
},
{
"api_name": "os.system",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "os.walk",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 116,
"usage_type": "attribute"
},
{
"api_name": "pickle.load",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "pickle.dump",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "pickle.dump",
"line_number": 178,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 191,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 194,
"usage_type": "call"
},
{
"api_name": "os.walk",
"line_number": 227,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 232,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 232,
"usage_type": "attribute"
},
{
"api_name": "pickle.load",
"line_number": 234,
"usage_type": "call"
},
{
"api_name": "pickle.dump",
"line_number": 276,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 286,
"usage_type": "call"
},
{
"api_name": "os.walk",
"line_number": 295,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 300,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 300,
"usage_type": "attribute"
},
{
"api_name": "pickle.load",
"line_number": 302,
"usage_type": "call"
},
{
"api_name": "pickle.dump",
"line_number": 353,
"usage_type": "call"
},
{
"api_name": "pickle.dump",
"line_number": 354,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 361,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 361,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 393,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 393,
"usage_type": "attribute"
}
] |
20677953442
|
from django.test import TestCase
from django.urls import reverse
from apps.shop.models import Product
from apps.users.models import CustomUser
from .models import Order
test_order = {"name": "Django Django", "email": "[email protected]", "paid": True}
test_product = {
"name": "Test Product",
"abbr": "TEPR",
"slug": "tepr",
"description": "Test Product description",
"price": 2000,
}
normal_user = {"username": "normal", "email": "[email protected]", "password": "foo"}
# Create your tests here.
class TestOrderModelCreation(TestCase):
"""Test Product Model Creation"""
def setUp(self):
self.test_order = test_order
self.test_product = test_product
Order.objects.create(
**self.test_order, product=Product.objects.create(**self.test_product)
)
def test_order_model_created(self):
obj = Order.objects.get(name=self.test_order["name"])
self.assertEqual(obj.name, self.test_order["name"])
self.assertEqual(obj.email, self.test_order["email"])
self.assertEqual(obj.paid, self.test_order["paid"])
self.assertEqual(obj.product.name, self.test_product["name"])
class TestOrderCreateView(TestCase):
"""Test Order Create View"""
def setUp(self):
self.test_order = test_order
self.test_product = test_product
self.test_user = normal_user
CustomUser.objects.create_user(**self.test_user)
Order.objects.create(
**self.test_order, product=Product.objects.create(**self.test_product)
)
def test_order_create_view(self):
response = self.client.get(reverse("order_create"))
self.assertTemplateUsed(response, "orders/order_form.html")
self.assertEqual(response.status_code, 200)
def test_main_author(self):
main_author = CustomUser.objects.get(username=self.test_user["username"])
main_author.main_user = True
main_author.save()
response = self.client.get(reverse("order_create"))
self.assertEqual(response.context["main_author"], main_author)
class TestSuccessView(TestCase):
"""Test Success View"""
def setUp(self):
self.test_user = normal_user
CustomUser.objects.create_user(**self.test_user)
def test_order_success_view(self):
response = self.client.get(reverse("success_created"))
self.assertTemplateUsed(response, "orders/success_created.html")
self.assertEqual(response.status_code, 200)
def test_main_author(self):
main_author = CustomUser.objects.get(username=self.test_user["username"])
main_author.main_user = True
main_author.save()
response = self.client.get(reverse("success_created"))
self.assertEqual(response.context["main_author"], main_author)
|
akundev/akundotdev
|
apps/orders/tests.py
|
tests.py
|
py
| 2,822 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.test.TestCase",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "models.Order.objects.create",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "models.Order.objects",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "models.Order",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "apps.shop.models.Product.objects.create",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "apps.shop.models.Product.objects",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "apps.shop.models.Product",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "models.Order.objects.get",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "models.Order.objects",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "models.Order",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "django.test.TestCase",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "apps.users.models.CustomUser.objects.create_user",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "apps.users.models.CustomUser.objects",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "apps.users.models.CustomUser",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "models.Order.objects.create",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "models.Order.objects",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "models.Order",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "apps.shop.models.Product.objects.create",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "apps.shop.models.Product.objects",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "apps.shop.models.Product",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "django.urls.reverse",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "apps.users.models.CustomUser.objects.get",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "apps.users.models.CustomUser.objects",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "apps.users.models.CustomUser",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "django.urls.reverse",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "django.test.TestCase",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "apps.users.models.CustomUser.objects.create_user",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "apps.users.models.CustomUser.objects",
"line_number": 74,
"usage_type": "attribute"
},
{
"api_name": "apps.users.models.CustomUser",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "django.urls.reverse",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "apps.users.models.CustomUser.objects.get",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "apps.users.models.CustomUser.objects",
"line_number": 83,
"usage_type": "attribute"
},
{
"api_name": "apps.users.models.CustomUser",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "django.urls.reverse",
"line_number": 86,
"usage_type": "call"
}
] |
24363848040
|
# This script should be executed inside a NetAddiction Odoo 9 shell.
import json
def remove_duplicate_attributes(product):
seen_ids = set()
duplicate_list = []
for attr in product.attribute_value_ids:
if attr.attribute_id.id not in seen_ids:
seen_ids.add(attr.attribute_id.id)
else:
duplicate_list.append(attr)
if duplicate_list:
product.write({"attribute_value_ids": [(3, attr.id) for attr in duplicate_list]})
return duplicate_list
duplicates = []
products = self.env["product.product"].search([])
for count, product in enumerate(products):
duplicate = remove_duplicate_attributes(product)
if duplicate:
print(duplicate)
duplicates.append(
{
"product_id": product.id,
"duplicates": [{"name": a.name, "type": a.attribute_id.display_name} for a in duplicate],
}
)
if not count % 100:
self._cr.commit()
self._cr.commit()
if duplicates:
with open("~/duplicates_found.json", "w") as fp:
json.dump(duplicates, fp, sort_keys=True, indent=4, separators=(",", ": "))
|
suningwz/netaddiction_addons
|
scripts/remove_duplicates_attribute.py
|
remove_duplicates_attribute.py
|
py
| 1,150 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "json.dump",
"line_number": 37,
"usage_type": "call"
}
] |
44938119316
|
import numpy as np
import redis
import struct
import cv2
import time
import curved_paths_coords as pc
from threading import Thread
r = redis.Redis(host='192.168.0.101', port=6379, db=0)
log_sensing_running =\
log_navigation_running =\
log_batterymeter_running =\
log_driving_running =\
log_detect_cam =\
voltages1_and_2 =\
log_sensing_time=\
log_target_distance_angle=\
log_path=\
log_path_min_cost=\
log_current_speed=\
log_in_front_of_car=\
log_uptime =\
path_received =\
received_target_coords = None
mapW = 400
mapH = 400
last_time=0
font = cv2.FONT_HERSHEY_SIMPLEX
map_refresh = 0.1 # interval between map refresh
map = np.full((mapW,mapH,3),100, np.uint8)
def redis_to_map(redis,name):
encoded = redis.get(name)
if encoded is None:
return np.full((mapW,mapH,3),100, np.uint8)
else:
h, w = struct.unpack('>II', encoded[:8])
array = np.frombuffer(encoded, dtype=np.uint8, offset=8).reshape(h, w, 1)
array = cv2.cvtColor(array,cv2.COLOR_GRAY2RGB)
return array
def update_data():
log_sensing_time_received = r.get('log_sensing_time')
if log_sensing_time_received is not None:
log_sensing_time = round(float(log_sensing_time_received),2)
else:
log_sensing_time = 0
log_target_distance_received = r.get('log_target_distance')
if log_target_distance_received is not None:
log_target_distance = round(float(log_target_distance_received),2)
else:
log_target_distance = "None"
log_target_angle_received = r.get('log_target_angle')
if log_target_angle_received is not None:
log_target_angle = round(float(log_target_angle_received),2)
else:
log_target_angle = "None"
log_target_distance_angle = str(log_target_distance) + " " + str(log_target_angle)
log_path_received = r.get('path')
if log_path_received is not None:
log_path = float(log_path_received)
else:
log_path = "None"
log_path_min_cost_received = r.get('path_min_cost')
if log_path_min_cost_received is not None:
log_path_min_cost = round(float(log_path_min_cost_received),2)
else:
log_path_min_cost = "None"
log_current_speed_received = r.get('current_speed')
if log_current_speed_received is not None:
log_current_speed = round(float(log_current_speed_received),2)
else:
log_current_speed = "None"
log_in_front_of_car_received = r.get('log_in_front_of_car')
if log_in_front_of_car_received is not None:
log_in_front_of_car = float(log_in_front_of_car_received)
else:
log_in_front_of_car = "None"
voltages_received = r.get('voltages')
if voltages_received is not None:
voltages = np.round(np.array(struct.unpack('%sf' %2, voltages_received)),2)
else:
voltages = [0,0]
voltages1_and_2 = str(voltages[0]) + " " + str(voltages[1])
log_uptime_received = r.get('log_uptime')
if log_uptime_received is not None:
log_uptime = int(float(log_uptime_received))
else:
log_uptime = "None"
log_sensing_running_received = r.get('log_sensing_running')
if log_sensing_running_received is not None:
log_sensing_running = str(log_sensing_running_received.decode("utf-8") )
else:
log_sensing_running = "off"
log_navigation_running_received = r.get('log_navigation_running')
if log_navigation_running_received is not None:
log_navigation_running = str(log_navigation_running_received.decode("utf-8") )
else:
log_navigation_running = "off"
log_batterymeter_running_received = r.get('log_batterymeter_running')
if log_batterymeter_running_received is not None:
log_batterymeter_running = str(log_batterymeter_running_received.decode("utf-8") )
else:
log_batterymeter_running = "off"
log_driving_running_received = r.get('log_driving_running')
if log_driving_running_received is not None:
log_driving_running = str(log_driving_running_received.decode("utf-8") )
else:
log_driving_running = "off"
log_detect_cam_received = r.get('log_detect_cam')
if log_detect_cam_received is not None:
log_detect_cam = str(log_detect_cam_received.decode("utf-8") )
else:
log_detect_cam = "None"
map = redis_to_map(r, "map")
path_received = r.get('path')
received_target_coords = r.get('target_car_coords')
def display_data():
cv2.rectangle(map,(187,242),(213,305),(0, 100, 255),-1) #draw car
visible_cone = np.array([[213, 242], [187, 242], [0, 0], [400, 0]], np.int32)
visible_cone = visible_cone.reshape((-1, 1, 2))
cv2.polylines(map, [visible_cone], True, (255,255,255), 1)
color_path = (0,255,0)
if path_received is None:
pass
elif int(path_received) == -1:
pass
else:
path = int(path_received)
if path > 5:
path_lookup = path - 5
l = -1
else:
path_lookup = path
l = 1
for square in range(0, 4):
#print(path,square)
x0 = int(l * pc.paths[path_lookup]['coords'][square][0] / 10 + mapW / 2)
y0 = mapH - int(pc.paths[path_lookup]['coords'][square][1] / 10 + 150)
x1 = int(l * pc.paths[path_lookup]['coords'][square][2] / 10 + mapW / 2)
y1 = mapH - int(pc.paths[path_lookup]['coords'][square][3] / 10 + 150)
x2 = int(l * pc.paths[path_lookup]['coords'][square + 1][0] / 10 + mapW / 2)
y2 = mapH - int(pc.paths[path_lookup]['coords'][square + 1][1] / 10 + 150)
x3 = int(l * pc.paths[path_lookup]['coords'][square + 1][2] / 10 + mapW / 2)
y3 = mapH - int(pc.paths[path_lookup]['coords'][square + 1][3] / 10 + 150)
poly = np.array([[x0,y0],[x1,y1],[x3,y3],[x2,y2]])
poly = poly.reshape((-1, 1, 2))
cv2.polylines(map,[poly],True,(255,255,255),1)
if received_target_coords is not None:
target_car_coords = np.array(struct.unpack('%sf' %3, received_target_coords))
mx = int(target_car_coords[0] * 100 + mapW / 2)
my = int(mapH - target_car_coords[2] * 100)
cv2.line(map, (int(mapW/2), mapH - 150), (mx, my - 150), (0,0,255), thickness=3)
topic_left=['sensing', \
'navigation',\
'batterymeter',\
'driving',\
'detect cam',\
]
logs_left=[log_sensing_running, \
log_navigation_running,\
log_batterymeter_running,\
log_driving_running,\
log_detect_cam\
]
topic_right=['battery voltages', \
'sensing time',\
'target dist, angle',\
'current path',\
'path min cost',\
'current speed',\
'obstacle height',\
'uptime'\
]
logs_right=[voltages1_and_2, \
log_sensing_time,\
log_target_distance_angle,\
log_path,\
log_path_min_cost,\
log_current_speed,\
log_in_front_of_car,\
log_uptime\
]
count = 1
for text in topic_left:
count +=1
cv2.putText(map, str(text), (20, 300 + 10 * count), font, 0.4, (255,255,255), 1)
count = 1
for text in logs_left:
count +=1
cv2.putText(map, str(text), (140, 300 + 10 * count), font, 0.4, (255,255,255), 1)
count = 1
for text in topic_right:
count +=1
cv2.putText(map, str(text), (187, 300 + 10 * count), font, 0.4, (255,255,255), 1)
count = 1
for text in logs_right:
count +=1
cv2.putText(map, str(text), (310, 300 + 10 * count), font, 0.4, (255,255,255), 1)
def try_to_connect():
while True:
try:
cv2.namedWindow('map', cv2.WINDOW_NORMAL)
display_data()
cv2.imshow('map', map)
key = cv2.waitKey(1)
if key & 0xFF == ord('q') or key == 27:
cv2.destroyAllWindows()
break
r.ping()
except redis.exceptions.ConnectionError as e:
print("retrying connection")
cv2.putText(map, "No connection to car", (35, 150), font, 1, (0,0,255), 3)
continue
else:
break
print("connected")
connected_to_redis = True
connected_to_redis = False
def display():
global connected_to_redis
while True:
try:
update_data()
print("here")
cv2.namedWindow('map', cv2.WINDOW_NORMAL)
display_data()
cv2.imshow('map', map)
if not connected_to_redis:
try_to_connect()
time.sleep(map_refresh)
except redis.exceptions.ConnectionError as e:
try_to_connect()
x = Thread(target=display, args=())
|
julianx4/skippycar
|
test.py
|
test.py
|
py
| 8,784 |
python
|
en
|
code
| 3 |
github-code
|
6
|
[
{
"api_name": "redis.Redis",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "cv2.FONT_HERSHEY_SIMPLEX",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "numpy.full",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "redis.get",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "numpy.full",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "struct.unpack",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "numpy.frombuffer",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "cv2.cvtColor",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_GRAY2RGB",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "numpy.round",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "struct.unpack",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "cv2.rectangle",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "numpy.int32",
"line_number": 145,
"usage_type": "attribute"
},
{
"api_name": "cv2.polylines",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "curved_paths_coords.paths",
"line_number": 165,
"usage_type": "attribute"
},
{
"api_name": "curved_paths_coords.paths",
"line_number": 166,
"usage_type": "attribute"
},
{
"api_name": "curved_paths_coords.paths",
"line_number": 167,
"usage_type": "attribute"
},
{
"api_name": "curved_paths_coords.paths",
"line_number": 168,
"usage_type": "attribute"
},
{
"api_name": "curved_paths_coords.paths",
"line_number": 170,
"usage_type": "attribute"
},
{
"api_name": "curved_paths_coords.paths",
"line_number": 171,
"usage_type": "attribute"
},
{
"api_name": "curved_paths_coords.paths",
"line_number": 172,
"usage_type": "attribute"
},
{
"api_name": "curved_paths_coords.paths",
"line_number": 173,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "cv2.polylines",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 180,
"usage_type": "call"
},
{
"api_name": "struct.unpack",
"line_number": 180,
"usage_type": "call"
},
{
"api_name": "cv2.line",
"line_number": 183,
"usage_type": "call"
},
{
"api_name": "cv2.putText",
"line_number": 220,
"usage_type": "call"
},
{
"api_name": "cv2.putText",
"line_number": 225,
"usage_type": "call"
},
{
"api_name": "cv2.putText",
"line_number": 230,
"usage_type": "call"
},
{
"api_name": "cv2.putText",
"line_number": 235,
"usage_type": "call"
},
{
"api_name": "cv2.namedWindow",
"line_number": 240,
"usage_type": "call"
},
{
"api_name": "cv2.WINDOW_NORMAL",
"line_number": 240,
"usage_type": "attribute"
},
{
"api_name": "cv2.imshow",
"line_number": 242,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 243,
"usage_type": "call"
},
{
"api_name": "cv2.destroyAllWindows",
"line_number": 245,
"usage_type": "call"
},
{
"api_name": "redis.exceptions",
"line_number": 248,
"usage_type": "attribute"
},
{
"api_name": "cv2.putText",
"line_number": 250,
"usage_type": "call"
},
{
"api_name": "cv2.namedWindow",
"line_number": 264,
"usage_type": "call"
},
{
"api_name": "cv2.WINDOW_NORMAL",
"line_number": 264,
"usage_type": "attribute"
},
{
"api_name": "cv2.imshow",
"line_number": 266,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 269,
"usage_type": "call"
},
{
"api_name": "redis.exceptions",
"line_number": 270,
"usage_type": "attribute"
},
{
"api_name": "threading.Thread",
"line_number": 273,
"usage_type": "call"
}
] |
73264679548
|
from pyglet.text import Label
from audio import explosion
from fonts.fonts import press_start_2p
from interfaces.interface import Interface
from system import system
import menus.menu
import menus.game_over_menu
class GameOverInterface(Interface):
game_over_label: Label = None
game_over_menu: menus.menu.Menu = None
def __init__(self):
self.game_over_label = Label('GAME OVER', font_name=press_start_2p, font_size=48)
self.game_over_label.anchor_x = 'center'
self.game_over_label.anchor_y = 'center'
self.game_over_menu = menus.game_over_menu.GameOverMenu()
self.resize()
window = system.get_window()
window.on_key_press = self.game_over_menu.on_key_press
self.game_over_menu.focused = True
explosion.play()
def on_draw(self):
self.game_over_label.draw()
self.game_over_menu.draw()
def resize(self):
window = system.get_window()
self.game_over_menu.move(window.width / 2, 100)
self.game_over_label.x = window.width / 2
self.game_over_label.y = window.height / 2
|
KimPalao/Headshot
|
interfaces/game_over_interface.py
|
game_over_interface.py
|
py
| 1,110 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "interfaces.interface.Interface",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "pyglet.text.Label",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "menus.menu.menu",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "menus.menu",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "pyglet.text.Label",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "fonts.fonts.press_start_2p",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "menus.menu.game_over_menu.GameOverMenu",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "menus.menu.game_over_menu",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "menus.menu",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "system.system.get_window",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "system.system",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "audio.explosion.play",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "audio.explosion",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "system.system.get_window",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "system.system",
"line_number": 32,
"usage_type": "name"
}
] |
2600836089
|
# Import thư viện
from sklearn.linear_model import LinearRegression
import numpy as np
import csv
from vnstock import*
data=[]
cp=listing_companies()
check='ngân hàng thương mại cổ phần'
nh=[]
for n in range(len(cp)):
if check in cp.loc[n][2].lower():
nh.append(cp.loc[n][0])
print(len(nh))
for ticket in nh:
linkfile='./nganhang/'+ticket+'.csv'
with open(linkfile) as file:
fp=csv.reader(file)
header=next(fp)
for row in fp:
data.append(row)
# Tạo dữ liệu giả định
K=[]
h=[]
for i in range(len(data)):
K.append([float(data[i][1]),float(data[i][2])])
h.append(float(data[i][4]))
# Tạo mô hình hồi quy tuyến tính
model = LinearRegression()
# Huấn luyện mô hình với dữ liệu
model.fit(K, h)
# In ra các hệ số của mô hình
print('Coefficients:', model.coef_)
# Dự đoán giá trị mới
x_new = np.array([[48850.0,48222.0]])
y_new = model.predict(x_new)
print('Predicted value:', y_new)
|
vanvy102/code
|
Code-test/linear.py
|
linear.py
|
py
| 1,070 |
python
|
vi
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "csv.reader",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "sklearn.linear_model.LinearRegression",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 37,
"usage_type": "call"
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.