seq_id
stringlengths 7
11
| text
stringlengths 156
1.7M
| repo_name
stringlengths 7
125
| sub_path
stringlengths 4
132
| file_name
stringlengths 4
77
| file_ext
stringclasses 6
values | file_size_in_byte
int64 156
1.7M
| program_lang
stringclasses 1
value | lang
stringclasses 38
values | doc_type
stringclasses 1
value | stars
int64 0
24.2k
⌀ | dataset
stringclasses 1
value | pt
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|---|
13588905096
|
import numpy as np
from sklearn.decomposition import PCA
# Calculate the average of the list
def calculate_list_avg(lst):
if len(lst) == 0:
avg_list = 0.0
else:
avg_list = sum(lst) / len(lst)
return avg_list
# Extract the information for each sample
def extract_msg(mrna_exp_mat, tf_exp_mat, mirna_exp_mat, mrna_id_list, tf_id_list, mirna_id_list,
mrna_to_mrna_dict, tf_to_mrna_dict, mirna_to_mrna_dict_for_mrna,
mirna_to_mrna_dict_for_mirna, mirna_to_tf_dict, tf_to_mirna_dict):
mrna_num = len(mrna_id_list)
tf_num = len(tf_id_list)
mirna_num = len(mirna_id_list)
sample_num = mrna_exp_mat.shape[1]
mrna_feature_mat = np.zeros((mrna_num, sample_num))
mrna_to_mrna_feature_mat = np.zeros((mrna_num, sample_num))
tf_to_mrna_feature_mat = np.zeros((mrna_num, sample_num))
mirna_to_mrna_feature_mat_for_mrna = np.zeros((mrna_num, sample_num))
mirna_feature_mat = np.zeros((mirna_num, sample_num))
mirna_to_mrna_feature_mat_for_mirna = np.zeros((mirna_num, sample_num))
mirna_to_tf_feature_mat = np.zeros((mirna_num, sample_num))
tf_to_mirna_feature_mat = np.zeros((mirna_num, sample_num))
# extract the useful information for each sample
for sample_index in range(sample_num):
mrna_index = 0
mirna_index = 0
# mRNA/TF/miRNA expression data
# Format:{ID:exp}
mrna_id_exp_dict = {}
tf_id_exp_dict = {}
mirna_id_exp_dict = {}
# Read the mRNA expression data save in the dictionary
for i in range(mrna_num):
mrna_id = mrna_id_list[i]
mrna_exp = float(mrna_exp_mat[i][sample_index])
mrna_id_exp_dict[mrna_id] = mrna_exp
for i in range(tf_num):
tf_id = tf_id_list[i]
tf_exp = float(tf_exp_mat[i][sample_index])
tf_id_exp_dict[tf_id] = tf_exp
for i in range(mirna_num):
mirna_id = mirna_id_list[i]
mirna_exp = float(mirna_exp_mat[i][sample_index])
mirna_id_exp_dict[mirna_id] = mirna_exp
# mRNA feature matrix
for mrna in mrna_id_list:
mrna_exp = mrna_id_exp_dict[mrna]
mrna_to_mrna_exp_list = []
tf_to_mrna_exp_list = []
mirna_to_mrna_exp_list_for_mrna = []
for i in mrna_to_mrna_dict[mrna]:
mrna_to_mrna_exp_list.append(mrna_id_exp_dict[i])
for i in tf_to_mrna_dict[mrna]:
tf_to_mrna_exp_list.append(tf_id_exp_dict[i])
for i in mirna_to_mrna_dict_for_mrna[mrna]:
mirna_to_mrna_exp_list_for_mrna.append(mirna_id_exp_dict[i])
# calculate the average of the list
avg_mrna_to_mrna_exp = calculate_list_avg(mrna_to_mrna_exp_list)
avg_tf_to_mrna_exp = calculate_list_avg(tf_to_mrna_exp_list)
avg_mirna_to_mrna_exp_for_mrna = calculate_list_avg(mirna_to_mrna_exp_list_for_mrna)
mrna_feature_mat[mrna_index, sample_index] = mrna_exp
mrna_to_mrna_feature_mat[mrna_index, sample_index] = avg_mrna_to_mrna_exp
tf_to_mrna_feature_mat[mrna_index, sample_index] = avg_tf_to_mrna_exp
mirna_to_mrna_feature_mat_for_mrna[mrna_index, sample_index] = avg_mirna_to_mrna_exp_for_mrna
mrna_index += 1
# mRNA feature matrix
for mirna in mirna_id_list:
mirna_to_mrna_exp_list_for_mirna = []
mirna_to_tf_exp_list = []
tf_to_mirna_exp_list = []
mirna_exp = mirna_id_exp_dict[mirna]
for i in mirna_to_mrna_dict_for_mirna[mirna]:
mirna_to_mrna_exp_list_for_mirna.append(mrna_id_exp_dict[i])
for i in mirna_to_tf_dict[mirna]:
mirna_to_tf_exp_list.append(tf_id_exp_dict[i])
for i in tf_to_mirna_dict[mirna]:
tf_to_mirna_exp_list.append(tf_id_exp_dict[i])
# calculate the average of the list
avg_mirna_to_mrna_exp_for_mirna = calculate_list_avg(mirna_to_mrna_exp_list_for_mirna)
avg_mirna_to_tf_exp = calculate_list_avg(mirna_to_tf_exp_list)
avg_tf_to_mirna_exp = calculate_list_avg(tf_to_mirna_exp_list)
mirna_feature_mat[mirna_index, sample_index] = mirna_exp
mirna_to_mrna_feature_mat_for_mirna[mirna_index, sample_index] = avg_mirna_to_mrna_exp_for_mirna
mirna_to_tf_feature_mat[mirna_index, sample_index] = avg_mirna_to_tf_exp
tf_to_mirna_feature_mat[mirna_index, sample_index] = avg_tf_to_mirna_exp
mirna_index += 1
return mrna_feature_mat, mrna_to_mrna_feature_mat, tf_to_mrna_feature_mat, mirna_to_mrna_feature_mat_for_mrna, \
mirna_feature_mat, mirna_to_mrna_feature_mat_for_mirna, mirna_to_tf_feature_mat, tf_to_mirna_feature_mat
# Use PCA to reduce dimension
def get_dim(total_ratio, temp_mat):
pca = PCA(n_components=total_ratio, svd_solver='full')
pca.fit_transform(temp_mat)
main_dim = pca.n_components_
return main_dim
# Use PCA to reduce dimension
def reduce_dim(dim, temp_mat):
pca = PCA(n_components=dim)
reduce_dim_mat = pca.fit_transform(temp_mat)
return reduce_dim_mat.T
|
yiangcs001/CSPRV
|
extract_features.py
|
extract_features.py
|
py
| 5,370 |
python
|
en
|
code
| 0 |
github-code
|
6
|
36814841108
|
import importlib
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from flask_assets import Environment
from flask_socketio import SocketIO
from config import config
db = SQLAlchemy()
migrate = Migrate()
assets = Environment()
socketio = SocketIO()
def create_app(config_name='default'):
app = Flask(__name__, static_url_path='')
app.config.from_object(config[config_name])
config[config_name].init_app(app)
__register_extensions(app, [db, assets, socketio])
__register_blueprints(app, ['live'])
from app.utils.assets import bundles
assets.register('main_css', bundles['main_css'])
assets.register('main_js', bundles['main_js'])
from app.cli import test, coverage, clean, lint
app.cli.add_command(test)
app.cli.add_command(coverage)
app.cli.add_command(clean)
app.cli.add_command(lint)
return app
def __register_extensions(app, extensions):
for extension in extensions:
extension.init_app(app)
migrate.init_app(app, db)
def __register_blueprints(app, modules):
for module in modules:
bp = getattr(importlib.import_module(f'app.{module}'), 'bp')
app.register_blueprint(bp)
|
reaper47/weather
|
app/__init__.py
|
__init__.py
|
py
| 1,223 |
python
|
en
|
code
| 0 |
github-code
|
6
|
31871823537
|
from http.server import HTTPServer, SimpleHTTPRequestHandler, BaseHTTPRequestHandler, test
import json
import io, shutil,urllib
from raidtool import get_models
host = ('localhost', 8888)
class CORSRequestHandler(SimpleHTTPRequestHandler):
def end_headers(self):
self.send_header('Access-Control-Allow-Origin', '*')
self.send_header('Access-Control-Allow-Methods', 'GET')
self.send_header('Cache-Control', 'no-store, no-cache, must-revalidate')
return super(CORSRequestHandler, self).end_headers()
def do_GET(self):
self.queryString = urllib.parse.unquote(self.path.split('?',1)[1])
params = urllib.parse.parse_qs(self.queryString)
print(params)
PID = int(params['pid'][0])
EC = int(params['ec'][0])
IVs = list(map(lambda x: int(x), params['IVs'][0].split(",")))
usefilters = False if int(params['usefilters'][0]) == 0 else True
MaxResults = int(params['maxResults'][0])
flawlessiv = int(params['flawlessiv'][0])
HA = int(params['ha'][0])
RandomGender = int(params['randomGender'][0])
IsShinyType = False if int(params['isShinyType'][0]) == 0 else True
data = {
'result': 'this is a test',
'filter': get_models(
PID,
EC,
IVs,
usefilters,
MaxResults,
flawlessiv,
HA,
RandomGender,
IsShinyType
)
}
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
self.wfile.write(json.dumps(data).encode())
if __name__ == '__main__':
server = HTTPServer(host, CORSRequestHandler)
print("Starting server, listen at: %s:%s" % host)
server.serve_forever()
|
a1992012015/find-tool
|
tool/api.py
|
api.py
|
py
| 1,861 |
python
|
en
|
code
| 14 |
github-code
|
6
|
43535235941
|
# function which returns the correct number entered by the user
def validate(numberPosition):
correctNumberEntered=False
while correctNumberEntered == False:
# Exception Handling using try except block
try:
if numberPosition==1:
# Converting the entered number to Int datatype
number=int(input("\nEnter the first number in decimal number system: "))
else:
number=int(input("\nEnter the second number in decimal number system: "))
# Returning the number if it is in range
if number >=0 and number <256:
return number
break
# Notifying the user that the number they entered was negative
elif number<0:
print("Please enter positive numbers only. Please try again:")
continue
# Notifying the user that the number they entered exceeds the range
elif number>255:
print("Please enter numbers between 0 and 255 only. Please try again:")
continue
except:
# Printing error message if exception occurs
print("Please enter whole numbers only. Please try again:")
|
PratikAmatya/8-bit-adder-Python-Program
|
Program Files/NumberValidation.py
|
NumberValidation.py
|
py
| 1,028 |
python
|
en
|
code
| 0 |
github-code
|
6
|
15444668840
|
from champ2.models import *
#======================================================
def matrice_value_set_incomplete_count(matrice_value_set):
#check to see that counts match up
metrics_count = MatriceMetric.objects.all().count()
values_count = matrice_value_set.values.filter(value__isnull=False).count()
return metrics_count - values_count
def matrice_value_set_invalid_count(matrice_value_set):
count = 0
#check to see that each value is between 1 add 4
for value in matrice_value_set.values.all():
if value.value != None:
if value.value > 4 or value.value < 1:
count = count +1
return count
#returns number of fields left to complete for a given metric and and value set
def matrice_program_area_incomplete_count(matrice_program_area, matrice_value_set):
metrics_count = MatriceMetric.objects.filter(matrice_program_area = matrice_program_area).count()
values_count = matrice_value_set.values.filter(matrice_metric__matrice_program_area = matrice_program_area).filter(value__isnull=False).count()
return metrics_count - values_count
|
adamfk/myewb2
|
myewb/apps/champ2/helper.py
|
helper.py
|
py
| 1,167 |
python
|
en
|
code
| null |
github-code
|
6
|
27259885900
|
"""We are the captains of our ships, and we stay 'till the end. We see our stories through.
"""
"""515. Find Largest Value in Each Tree Row
"""
from collections import deque
class TreeNode:
def __init__(self, val):
self.val = val
self.left = None
self.right = None
class Solution:
def largestValues(self, root):
if not root:
return []
largest_values = []
queue = deque()
queue.append(root)
queue.append(None)
row_max = float('-inf')
while queue:
node = queue.popleft()
if node:
row_max = max(row_max, node.val)
if node.left:
queue.append(node.left)
if node.right:
queue.append(node.right)
else:
largest_values.append(row_max)
row_max = float('-inf')
if queue:
queue.append(None)
return largest_values
|
asperaa/back_to_grind
|
Trees/largestValues.py
|
largestValues.py
|
py
| 1,007 |
python
|
en
|
code
| 1 |
github-code
|
6
|
38601541912
|
#!/usr/bin/python3
import argparse
import sys
import json
import dballe
__version__ = '@PACKAGE_VERSION@'
def main(inputfiles, out):
importer = dballe.Importer("BUFR")
out.write('{"type":"FeatureCollection", "features":[')
for f in inputfiles:
with importer.from_file(f) as fp:
is_first = True
for msgs in fp:
for msg in msgs:
for cur in msg.query_data():
lev = cur["level"]
tr = cur["trange"]
if not is_first:
out.write(",")
else:
is_first = False
var = cur["variable"]
json.dump({
"type": "Feature",
"geometry": {
"type": "Point",
"coordinates": [cur.enqd("lon"), cur.enqd("lat")],
},
"properties": {
"lon": cur.enqi("lon"),
"lat": cur.enqi("lat"),
"datetime": cur["datetime"].strftime("%Y-%m-%dT%H:%M:%SZ"),
"network": cur["report"],
"ident": cur["ident"],
"level_t1": lev.ltype1 if lev is not None else None,
"level_v1": lev.l1 if lev is not None else None,
"level_t2": lev.ltype2 if lev is not None else None,
"level_v2": lev.l2 if lev is not None else None,
"trange_pind": tr.pind if tr is not None else None,
"trange_p1": tr.p1 if tr is not None else None,
"trange_p2": tr.p2 if tr is not None else None,
"bcode": var.code,
"value": var.get(),
}
}, out)
out.write("]}")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Convert BUFR files to GeoJSON format")
parser.add_argument("inputfile", nargs="*", metavar="FILE", help="BUFR file")
parser.add_argument('-V', '--version', action='version',
version='%(prog)s ' + __version__)
args = parser.parse_args()
if not args.inputfile:
inputfiles = [sys.stdin]
else:
inputfiles = args.inputfile
main(inputfiles, sys.stdout)
|
ARPA-SIMC/bufr2json
|
bufr2json.py
|
bufr2json.py
|
py
| 2,639 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21160846100
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 30 19:00:57 2018
@author: HP
"""
from numpy import asarray
from numpy import zeros
import numpy as np
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential, Model
from keras.layers import Dense
from keras.layers import Flatten, LSTM ,Dropout,GRU, Bidirectional
from keras.layers import Embedding, Maximum, Merge, Input, concatenate
from collections import defaultdict
from nltk.corpus import brown,stopwords
from keras.layers import Conv1D, MaxPooling1D, Conv2D, MaxPooling2D
import random
import nltk
#brown.categories()
#brown.words(categories='news')
#brown.words(fileids=['cg22'])
#brown.sents(categories=['news', 'editorial', 'reviews'])
batch_size=30
embedding_size=128
# Convolution
kernel_size = 5
filters1 = 64
filters2 =128
filters3=256
filters4=512
filters5=1024
pool_size = 4
# GRU
gru_output_size = 70
#LSTM
lstm_output_size = 70
dataset = [] # 500 samples
for category in brown.categories():
for fileid in brown.fileids(category):
dataset.append((brown.words(fileids = fileid),category))
dataset = [([w.lower() for w in text],category) for text,category in dataset]
labels=[]
for sample in dataset:
labels.append(sample[1])
inputset=[]
for sample in dataset:
inputset.append(' '.join(sample[0]))
categ=brown.categories()
label_class=[]
for x in labels:
label_class.append(categ.index(x))
len_finder=[]
for dat in inputset:
len_finder.append(len(dat))
input_train=[]
j=0;
for zz in inputset:
j=j+1
if (j%4 is not 0):
input_train.append(zz)
input_test=[]
j=0;
for zz in inputset:
j=j+1
if (j%4 is 0):
input_test.append(zz)
label_train=[]
j=0;
for zz in label_class:
j=j+1
if (j%4 is not 0):
label_train.append(zz)
label_test=[]
j=0;
for zz in label_class:
j=j+1
if (j%4 is 0):
label_test.append(zz)
#one hot encoding
i=0
y=np.zeros((len(label_class),max(label_class)+1))
for x in label_class:
y[i][x]=1
i=i+1
i=0
y_train=np.zeros((len(label_train),max(label_train)+1))
for x in label_train:
y_train[i][x]=1
i=i+1
i=0
y_test=np.zeros((len(label_test),max(label_test)+1))
for x in label_test:
y_test[i][x]=1
i=i+1
t = Tokenizer()
t.fit_on_texts(input_train)
vocab_size = len(t.word_index) + 1
# integer encode the documents
encoded_docs = t.texts_to_sequences(input_train)
#print(encoded_docs)
# pad documents to a max length of 4 words
max_length = max(len_finder)
padded_docs = pad_sequences(encoded_docs, maxlen=max_length, padding='post')
#print(padded_docs)
# load the whole embedding into memory
embeddings_index = dict()
f = open("G:\\NLP\\Dataset\\GloVe\\glove.6B.100d.txt", encoding="utf8")
for line in f:
values = line.split()
word = values[0]
coefs = asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
f.close()
#print('Loaded %s word vectors.' % len(embeddings_index))
# create a weight matrix for words in training docs
embedding_matrix = zeros((vocab_size, 100))
for word, i in t.word_index.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
rows,cols = padded_docs.shape
input_shape = Input(shape=(rows,cols))
e = Embedding(vocab_size, 100, weights=[embedding_matrix], input_length=max_length, trainable=False)(input_shape)
tower_1 = Conv1D(64,kernel_size,padding='valid',activation='relu',strides=1)(e)
tower_1 = MaxPooling1D(pool_size=pool_size)(tower_1)
tower_2 = Conv1D(64,kernel_size,padding='valid',activation='relu',strides=1)(e)
tower_2 = MaxPooling1D(pool_size=pool_size)(tower_2)
merged = concatenate([tower_1, tower_2])
out = Dense(200, activation='relu')(merged)
out = Dense(15, activation='softmax')(out)
model = Model(input_shape, out)
# compile the model
model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy'])
# summarize the model
print(model.summary())
# fit the model
model.fit(padded_docs,y_train, epochs=3, verbose=0)
#Testing the model
tt = Tokenizer()
tt.fit_on_texts(input_test)
tvocab_size = len(tt.word_index) + 1
# integer encode the documents
tencoded_docs = tt.texts_to_sequences(input_test)
#print(encoded_docs)
# pad documents to a max length of 4 words
tpadded_docs = pad_sequences(tencoded_docs, maxlen=max_length, padding='post')
#print(padded_docs)
# evaluate the model
loss, accuracy = model.evaluate(tpadded_docs, y_test, verbose=0)
print('Accuracy: %f' % (accuracy*100))
|
mharish2797/DNN-Text-Classifiers
|
Simple DNN classifiers/Brown Corpus based basic DNN Classifiers/Brown_classifier with parallel network.py
|
Brown_classifier with parallel network.py
|
py
| 4,703 |
python
|
en
|
code
| 2 |
github-code
|
6
|
18601147125
|
from . import utils
from . import s_1040
data = utils.parse_values()
###################################
def build_data():
form_1040 = s_1040.build_data()
data_dict = {
'ssn' : data['ssn'],
'first_and_initial' : data['name_first'] + ' ' + data['name_middle_i'],
'last' : data['name_last'],
'address' : data['address'],
'city_state_zip' : (data['address_city']
+ ', ' + data['address_state']
+ ' ' + data['address_zip']),
}
if 'apartment' in data:
data_dict['apartment'] = data['apartment']
utils.add_keyed_float(form_1040['_owed'],
'pay',
data_dict)
return data_dict
def fill_in_form():
data_dict = build_data()
basename = 'f1040v.pdf'
utils.write_fillable_pdf(basename, data_dict, 'f1040v.keys')
if __name__ == '__main__':
fill_in_form()
|
pyTaxPrep/taxes-2018
|
forms/s_1040v.py
|
s_1040v.py
|
py
| 1,004 |
python
|
en
|
code
| 31 |
github-code
|
6
|
70488535867
|
# accepted on codewars.com
import math
# max recursion depth exceeded for the max text value = 9000000000000000000
def count_digit_five(max_num: int) -> int:
memo_table = [-1] * len(str(max_num))
def recursive_seeker(n: int) -> int:
if n < 10:
if n < 5:
return 0
else:
return 1
str_num = math.floor(math.log10(n)) + 1
number = str(n)[0]
if n == int(math.pow(10, str_num - 1)) - 1 and memo_table[str_num - 1] != -1:
return memo_table[str_num - 1]
count_all = 0
# first phase
count_all += recursive_seeker(n - int(number) * int(math.pow(10, str_num - 1)))
# second phase
count_all += int(number) * recursive_seeker(int(math.pow(10, str_num - 1) - 1))
# third phase
if int(number) >= 6:
count_all += int(math.pow(10, str_num - 1))
elif int(number) == 5:
count_all += n - 5 * int(math.pow(10, str_num - 1)) + 1
if n == int(math.pow(10, str_num - 1)) - 1 and memo_table[str_num - 1] == -1:
memo_table[str_num - 1] = count_all
return count_all
return recursive_seeker(max_num)
# the accepted one:
def dont_give_me_five(start, end):
# count the numbers from start to end that don't contain the digit 5
counter = 0
# manipulation with diff ends of interval given and method below
if start == 0 and end == 0:
return 0
elif end == 0:
return cycle_count_digit_without_five(abs(start)) + 1
elif start == 0:
return cycle_count_digit_without_five(abs(end)) + 1
elif start * end < 0:
counter += cycle_count_digit_without_five(abs(start)) + cycle_count_digit_without_five(abs(end)) + 1
else:
counter += abs(cycle_count_digit_without_five(abs(start)) - cycle_count_digit_without_five(abs(end))) + (1 if '5' not in str(min(abs(start), abs(end))) else 0)
return counter
# a fast solution
def cycle_count_digit_without_five(max_num):
# a string representation of a max number given
str_max_num = str(max_num)
new_num = '' # a string for a new num
for i in range(0, len(str_max_num)): # some operations on number given: 19645779 -> 1854 5 999 -> 1854 4 888 -> 18545999 -> 1*9^4 + 4*9^3 + 8*9^2 + 8*9^1 + 8*9^0 = number
if str_max_num[i] == '5':
new_num += '4'
new_num += '8' * (len(str_max_num) - i - 1)
break
new_num += (str(int(str_max_num[i]) - 1) if int(str_max_num[i]) > 5 else str_max_num[i])
result = 0
# translating to a system with base 10 from 9:
for i in range(0, len(new_num)):
result += int(new_num[i]) * 9 ** (len(new_num) - i - 1)
return result
#
# print(int('1001', 9))
# print(count_digit_five(64))
# print(count_digit_five(65))
# print(count_digit_five(66))
# print(count_digit_five(99))
# print(count_digit_five(9000000000000000000))
# cycle_count_digit_without_five(19989856661)
# cycle_count_digit_without_five(144366)
# print(cycle_count_digit_without_five(90))
# print(cycle_count_digit_without_five(1001))
print(dont_give_me_five(984, 4304)) # 2449
print(dont_give_me_five(51, 60)) # 1
print(dont_give_me_five(-4436, -1429)) # 2194
print(dont_give_me_five(-2490228783604515625, -2490228782196537011)) # 520812180
print(dont_give_me_five(-9000000000000000000, 9000000000000000000)) # 2401514164751985937
print(dont_give_me_five(40076, 2151514229639903569)) # 326131553237897713
print(dont_give_me_five(-206981731, 2235756979031654521)) # 340132150309630357
print(dont_give_me_five(0, 1))
print(dont_give_me_five(5, 15))
print(dont_give_me_five(-5, 4))
|
LocusLontrime/Python
|
CodeWars_Rush/_4kyu/Dont_give_me_five_Really_4kyu.py
|
Dont_give_me_five_Really_4kyu.py
|
py
| 3,693 |
python
|
en
|
code
| 1 |
github-code
|
6
|
17668930312
|
#!/usr/bin/env python3
# Compare event boundary timing in HMMs from cortical Yeo ROIs
# to timing in hand(RA)-labeled events
import os
import tqdm
import brainiak.eventseg.event
from scipy.fftpack import fft,ifft
from scipy.stats import zscore, norm, pearsonr
from HMM_settings import *
from event_comp import ev_conv, Pro_ev_conv, child_ev_conv
ev_conv = child_ev_conv
ev_conv_perm = ev_conv[1:]
task='DM'
nTR=750
nbins = len(bins)
nROI = len(ROIl)
xcorrx = np.concatenate([np.arange(-nTR+2,0)*TR,np.arange(nTR-1)*TR])
savefile = HMMpath+'HMM_vs_hand_child_'
dE_k = {key:{key:[] for key in bins} for key in ROIl}
dE_k_corr = np.zeros((nROI,nbins))
bin_corr = np.zeros(nROI)
#dE_k_p = np.zeros((nPerm+1,nROI,nbins))
event_bounds = {key:{key:[] for key in bins} for key in ROIl}
matchz_mat = np.zeros((nROI,nbins))
for seed in tqdm.tqdm(seeds):
for r,roi_short in tqdm.tqdm(enumerate(ROIl)):
roi=HMMsavedir+seed+'/'+roi_short+'.h5'
k = dd.io.load(roi,'/best_k')
D = [dd.io.load(roidir+seed+'/'+roi_short+'.h5','/'+task+'/bin_'+str(b)+'/D') for b in bins]
hmm = brainiak.eventseg.event.EventSegment(n_events=k)
hmm.fit([np.mean(d,axis=0).T for d in D])
for bi,b in enumerate(bins):
dE_k[roi_short][b] = np.diff(np.dot(hmm.segments_[bi], np.arange(k)+1))
dE_k_corr[r,bi],_ = pearsonr(dE_k[roi_short][b],ev_conv_perm)
bin_corr[r],_ = pearsonr(dE_k[roi_short][0],dE_k[roi_short][4])
dd.io.save(savefile+'_'+seed+'.h5',{'dE_k_corr':dE_k_corr, 'dE_k':dE_k, 'bin_corr':bin_corr})
|
samsydco/HBN
|
HMM_vs_hand.py
|
HMM_vs_hand.py
|
py
| 1,502 |
python
|
en
|
code
| 2 |
github-code
|
6
|
15983166378
|
# Mjolnir
from ...infrastrcutures.dynamo.infrastructure import DynamodbInfrastructure
# Third party
from boto3.dynamodb.conditions import Key
from decouple import config
class DynamodbRepository:
infra = DynamodbInfrastructure
@classmethod
async def get_items(cls, key: str, value: str) -> list:
async with cls.infra.get_dynamodb_resource() as dynamodb_resource:
table = await dynamodb_resource.Table(config('AWS_TABLE_NAME'))
result = await table.query(
KeyConditionExpression=Key(key).eq(value)
)
return result['Items']
@classmethod
async def put_items(cls, item: dict):
async with cls.infra.get_dynamodb_resource() as dynamodb_resource:
table = await dynamodb_resource.Table(config('AWS_TABLE_NAME'))
await table.put_item(
Item=item
)
|
vinireeis/Mjolnir
|
src/repositories/dynamodb/repository.py
|
repository.py
|
py
| 895 |
python
|
en
|
code
| 0 |
github-code
|
6
|
342809739
|
from collections import OrderedDict
import numpy as np
import pandas as pd
import requests
from bs4 import BeautifulSoup
import re
df = pd.read_csv('pd_url_list_short.csv') #df 변수로 csv 파일을 읽어옵니다.
#기존에 수동으로 입력하던 크롤링 범위를 start와 end로 지정해줬습니다.(클래스 만들때 입력)
class GetText(object):
def __init__(self, ulist, start, end): #나중에 ulist 부분에는 앞에서 정의한 df를 넣어줍니다.
self.ulist = ulist
self.start = start
self.end = end
def wine_info(self): #wine_dict는 id, name, production 등등을 key로 갖는 사전.
wine_dict = OrderedDict() # 각각의 key는 리스트를 value로 갖습니다.
wine_dict['id'] = []
wine_dict['name'] = []
wine_dict['production1'] = []
wine_dict['production2'] = []
wine_dict['production3'] = []
wine_dict['production4'] = []
wine_dict['type'] = []
wine_dict['alc'] = []
wine_dict['producer'] = []
wine_dict['varieties'] = []
wine_dict['bestfor'] = []
wine_dict['sweetness'] = []
wine_dict['body'] = []
wine_dict['tastingnote'] = []
for i in range(self.start, self.end): # 크롤링할 범위 설정(wine_code가 아니라 인덱스 번호)
url = self.ulist.iloc[i]['URL'] # self.ulist가 dataframe 형식이므로 iloc 이용해서 url을 가져옵니다.
res = requests.get(url)
soup = BeautifulSoup(res.content)
idnum = re.search(r'\d{5}', url).group() #wine_code부터 크롤링 시작
wine_dict['id'].append(idnum)
try:
li0 = soup.find('li', attrs = {'class' : 'WineEndName'}) #예외처리 해줄 것
wine_name = li0.get_text()
wine_dict['name'].append(wine_name)
except:
wine_dict['name'].append('None')
try:
li1 = soup.find('li', attrs = {'class' : 'WineProduction'})
a = li1.find_all('a')
for i in range(4):
if i <= len(a) -1 :
wine_dict['production{}'.format(i+1)].append(a[i].get_text())
else :
wine_dict['production{}'.format(i+1)].append('None')
except:
wine_dict['production1'].append('None')
wine_dict['production2'].append('None')
wine_dict['production3'].append('None')
wine_dict['production4'].append('None')
try:
li1_1 = soup.find('li', attrs = {'class' : 'WineInfo'})
words = li1_1.get_text().strip()
wine_dict['type'].append(re.search(r'^\w+', words).group())
except:
wine_dict['type'].append('None')
try:
li = soup.find('li', attrs = {'class' : 'WineInfo'})
aic = re.search(r'AIC[.\d]+', li.get_text().strip())
if not aic :
wine_dict['alc'].append('None')
else :
wine_dict['alc'].append(aic.group())
except:
wine_dict['alc'].append('None')
try:
li2 = soup.find('li', attrs = {'class' : 'Winery'})
producer = li2.a.get_text()
reproducer = re.sub(r'\s', ' ', producer)
wine_dict['producer'].append(reproducer)
except:
wine_dict['producer'].append('None')
try:
li3 = soup.find('li', attrs = {'class' : 'Varieties'})
varieties = ''
for var in li3.find_all('a') :
varieties += var.get_text()
wine_dict['varieties'].append(varieties)
except:
wine_dict['varieties'].append('None')
try:
li4 = soup.find('li', attrs = {'class' : 'BestFor'})
bestfor = li4.get_text()
wine_dict['bestfor'].append(bestfor.strip())
except:
wine_dict['bestfor'].append('None')
try :
li6 = soup.find('li', attrs = {'class' : 'Sweetness'})
px = li6.find_all('img')[1]['style']
wine_dict['sweetness'].append(re.search(r'\d+', px).group())
except :
wine_dict['sweetness'].append('None')
try :
li7 = soup.find('li', attrs = {'class' : 'Body'})
px = li7.find_all('img')[1]['style']
wine_dict['body'].append(re.search(r'\d+', px).group())
except :
wine_dict['body'].append('None')
try:
ul = soup.find('ul', attrs = {'class' : 'TastingnoteList'})
note = ul.get_text().strip()
subnote = re.sub(r'\s', ' ', note) #정규표현식으로 \s(공백?)을 그냥 띄어쓰기로 바꿔줬습니다.
wine_dict['tastingnote'].append(subnote) #(\s 형식 중에 공백이 아닌 문자도 있던데 그부분이 저장시
except: #문제를 일으키는것 같아서요)
wine_dict['tastingnote'].append('None')
wine_df = pd.DataFrame(wine_dict) # 사전 형식의 wine_dict를 dataframe 형식의 wine_df로 바꿔줍니다.
return wine_df
#엑셀로 저장하는 것이 문제이므로 500개씩 저장을 시도하고 오류가 나면 다음 500개를 저장하게 코드를 짰습니다.
#0~4000번째까지 긁는 코드입니다.
i=0
while i<4000:
wine2 = GetText(df,i,i+500) # 시작과 끝이 루프를 돌 때마다 변하게 설정
result = wine2.wine_info()
try:
writer = pd.ExcelWriter('./wine{}_{}.xlsx'.format(i,i+500), engine=None) #파일 이름도 자동으로 변경하게 설정
result.to_excel(writer, sheet_name='1', encoding ='utf-8') # 결과를 엑셀로 저장
writer.save()
i += 500 #500개를 크롤링 후 저장을 끝내면 i가 500씩 증가
except:
i += 500 #오류가 나면 바로 i가 500만큼 증가해서 다음 500개에 대한 크롤링을 진행합니다.
continue
|
nosangho/team_project
|
[02-15][junyang] wine21_save_loop.py
|
[02-15][junyang] wine21_save_loop.py
|
py
| 6,458 |
python
|
ko
|
code
| 0 |
github-code
|
6
|
7759575517
|
import serial
class SerialParameters:
def __init__(self, port=None, baudrate=9600, bytesize=serial.EIGHTBITS, parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE, timeout=None, xonxoff=False, rtscts=False,
write_timeout=None, dsrdtr=False, inter_byte_timeout=None, exclusive=None,
local_echo=False, appendCR=False, appendLF=False):
self.port = port
self.baudrate = baudrate
self.bytesize = bytesize
self.parity = parity
self.stopbits = stopbits
self.timeout = timeout
self.xonxoff = xonxoff
self.rtscts = rtscts
self.write_timeout = write_timeout
self.dsrdtr = dsrdtr
self.inter_byte_timeout = inter_byte_timeout
self.exclusive = exclusive
self.readTextIndex = "read_line"
self.readBytes = 1
self.readUntil = ''
self.DTR = False
self.maxSignalRate = 10 # Hz
self.Kennbin = ""
self.local_echo = local_echo
self.appendCR = appendCR
self.appendLF = appendLF
|
timhenning1997/Serial-Port-Monitor
|
SerialParameters.py
|
SerialParameters.py
|
py
| 1,086 |
python
|
en
|
code
| 2 |
github-code
|
6
|
10914883697
|
import sys
import pickle as pkl
import sciunit_tree
import ExecutionTree as exT
from algorithms import pc
def replay_sequence(tree_binary, cache_size, replay_order_binary):
sciunit_execution_tree, _ = sciunit_tree.tree_load(tree_binary)
tree = exT.create_tree('SCIUNIT', tree_binary)
tree.cache_size = cache_size
pc(tree)
with open(replay_order_binary, 'wb') as robf:
pkl.dump((sciunit_execution_tree, tree), robf)
if __name__ == '__main__':
if len(sys.argv) < 5:
print('Usage replay-order.py pc|prpv1|prpv2|lfu <cache_size> <input tree.bin> <output replay-order.bin>')
if sys.argv[1] in ['pc']:
replay_sequence(sys.argv[3], float(sys.argv[2]), sys.argv[4])
elif sys.argv[1] in ['prpv1', 'prpv2', 'lfu']:
raise NotImplementedError(f'{sys.argv[1]} Replay Order Not Implemented')
else:
print('Usage replay-order.py pc|prpv1|prpv2|lfu <cache_size> <input tree.bin> <output replay-order.bin>')
|
depaul-dice/CHEX
|
src/replay/replay-order.py
|
replay-order.py
|
py
| 972 |
python
|
en
|
code
| 0 |
github-code
|
6
|
18572100394
|
#判断式
# x=input("请输入数字:") #取得字串形式的使用者输入
# x=int(x) #将字串形态转换成数字形态
# if x>200:
# print("大于 200")
# elif x>100:
# print("大于 100, 小于等于 200")
# else:
# print("小于等于 100")
# 四则运算
n1=int(input("请输入数字1:"))
n2=int(input("请输入数字2:"))
op=input("请输入运算: + , - , * , / =")
if op=="+":
print(f"{n1}加{n2}等于{n1+n2}") #后来改
# print(n1+n2) #原本
elif op=="-":
print(f"{n1}减{n2}等于{n1-n2}") #后来改
# print(n1-n2) #原本
elif op=="*":
print(f"{n1}除{n2}等于{n1*n2}") #后来改
# print(n1*n2) #原本
elif op=="/":
print(f"{n1}除{n2}等于{n1/n2}") #后来改
# print(n1/n2) #原本
else:
print("不支援的运算 cao ni ma")
|
yeste-rge/-Python-By-
|
彭彭/#06 流程控制:if 判斷式/condition.py
|
condition.py
|
py
| 805 |
python
|
zh
|
code
| 1 |
github-code
|
6
|
37552127134
|
from utils.utils import getLinesOfFile
def getPriority(char: str):
asciiVal = ord(char[0])
if(asciiVal>=97 and asciiVal<=122):
# lettera minuscola
return asciiVal-96
else:
#lettera maiuscola
return asciiVal - 65 + 27
def findLetterInBothString(s1,s2):
for char in s1:
if char in s2:
return char
return "ERROR"
def findLetterInAllString(s1,s2,s3):
for char in s1:
if char in s2 and char in s3:
return char
return "ERROR"
class Rucksack:
def __init__(self, row:str):
self.rucksack = row
self.firstCompartment = row[:len(row)//2]
self.secondCompartment = row[len(row)//2:]
if __name__ == '__main__':
rucksacks = [Rucksack(elem) for elem in getLinesOfFile('input.txt')]
priorities = [getPriority(findLetterInBothString(elem.firstCompartment, elem.secondCompartment)) for elem in rucksacks]
print(f"sum of priorities is {sum(priorities)}")
groups = [getLinesOfFile('input.txt')[n:n+3] for n in range(0, len(rucksacks), 3)]
priorities2 = [getPriority(findLetterInAllString(*group)) for group in groups]
print(f"sum of priorities of badges is {sum(priorities2)}")
|
liuker97/adventOfCode2022
|
src/day3/day3.py
|
day3.py
|
py
| 1,216 |
python
|
en
|
code
| 0 |
github-code
|
6
|
19243874206
|
import sys
from pathlib import Path
import environ
PROJECT_DIR = Path(__file__).resolve().parent
ROOT_DIR = PROJECT_DIR.parent
# Environment
ENV_FILE = "/etc/purldb/.env"
if not Path(ENV_FILE).exists():
ENV_FILE = ROOT_DIR / ".env"
env = environ.Env()
environ.Env.read_env(str(ENV_FILE))
# Security
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("ALLOWED_HOSTS", default=[".localhost", "127.0.0.1", "[::1]"])
# SECURITY WARNING: do not run with debug turned on in production
DEBUG = env.bool("PURLDB_DEBUG", default=False)
PURLDB_REQUIRE_AUTHENTICATION = env.bool(
"PURLDB_REQUIRE_AUTHENTICATION", default=False
)
# SECURITY WARNING: do not run with debug turned on in production
DEBUG_TOOLBAR = env.bool("PURLDB_DEBUG_TOOLBAR", default=False)
PURLDB_PASSWORD_MIN_LENGTH = env.int("PURLDB_PASSWORD_MIN_LENGTH", default=14)
# SCANCODE.IO
SCANCODEIO_URL = env.str("SCANCODEIO_URL", "")
SCANCODEIO_API_KEY = env.str("SCANCODEIO_API_KEY", "")
# PurlDB
PURLDB_LOG_LEVEL = env.str("PURLDB_LOG_LEVEL", "INFO")
# Application definition
INSTALLED_APPS = (
# Local apps
# Must come before Third-party apps for proper templates override
'clearcode',
'clearindex',
'minecode',
'matchcode',
'packagedb',
# Django built-in
"django.contrib.auth",
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
"django.contrib.humanize",
# Third-party apps
'django_filters',
'rest_framework',
'rest_framework.authtoken',
)
MIDDLEWARE = (
"django.middleware.security.SecurityMiddleware",
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'purldb_project.urls'
WSGI_APPLICATION = "purldb_project.wsgi.application"
SECURE_PROXY_SSL_HEADER = env.tuple(
"SECURE_PROXY_SSL_HEADER", default=("HTTP_X_FORWARDED_PROTO", "https")
)
# API
DATA_UPLOAD_MAX_NUMBER_FIELDS = env.int(
"DATA_UPLOAD_MAX_NUMBER_FIELDS", default=2048
)
# Database
DATABASES = {
'default': {
'ENGINE': env.str('PACKAGEDB_DB_ENGINE', 'django.db.backends.postgresql'),
'HOST': env.str('PACKAGEDB_DB_HOST', 'localhost'),
'NAME': env.str('PACKAGEDB_DB_NAME', 'packagedb'),
'USER': env.str('PACKAGEDB_DB_USER', 'packagedb'),
'PASSWORD': env.str('PACKAGEDB_DB_PASSWORD', 'packagedb'),
'PORT': env.str('PACKAGEDB_DB_PORT', '5432'),
'ATOMIC_REQUESTS': True,
}
}
DEFAULT_AUTO_FIELD = "django.db.models.AutoField"
# Templates
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
"DIRS": [str(PROJECT_DIR.joinpath("templates"))],
"APP_DIRS": True,
'OPTIONS': {
"debug": DEBUG,
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.request',
"django.template.context_processors.static",
],
},
},
]
# Login
LOGIN_REDIRECT_URL = "/"
LOGOUT_REDIRECT_URL = "/"
# Passwords
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
"OPTIONS": {
"min_length": PURLDB_PASSWORD_MIN_LENGTH,
},
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
# Testing
# True if running tests through `./manage test or pytest`
IS_TESTS = any(clue in arg for arg in sys.argv for clue in ("test", "pytest"))
# Cache
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
"LOCATION": "default",
}
}
# Logging
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"simple": {
"format": "{levelname} {message}",
"style": "{",
},
},
"handlers": {
"null": {
"class": "logging.NullHandler",
},
"console": {
"class": "logging.StreamHandler",
"formatter": "simple",
},
},
"loggers": {
"scanpipe": {
"handlers": ["null"] if IS_TESTS else ["console"],
"level": PURLDB_LOG_LEVEL,
"propagate": False,
},
"django": {
"handlers": ["null"] if IS_TESTS else ["console"],
"propagate": False,
},
# Set PURLDB_LOG_LEVEL=DEBUG to display all SQL queries in the console.
"django.db.backends": {
"level": PURLDB_LOG_LEVEL,
},
},
}
# Internationalization
LANGUAGE_CODE = "en-us"
TIME_ZONE = env.str("TIME_ZONE", default="UTC")
USE_I18N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
STATIC_URL = '/static/'
STATIC_ROOT = '/var/purldb/static/'
STATICFILES_DIRS = [
PROJECT_DIR / 'static',
]
# Third-party apps
# Django restframework
REST_FRAMEWORK_DEFAULT_THROTTLE_RATES = {'anon': '3600/hour', 'user': '10800/hour'}
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': ('rest_framework.authentication.TokenAuthentication',),
'DEFAULT_PERMISSION_CLASSES': ('rest_framework.permissions.IsAuthenticated',),
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
'rest_framework.renderers.BrowsableAPIRenderer',
'rest_framework.renderers.AdminRenderer',
),
'DEFAULT_FILTER_BACKENDS': (
'django_filters.rest_framework.DjangoFilterBackend',
'rest_framework.filters.SearchFilter',
),
'DEFAULT_THROTTLE_CLASSES': [
'packagedb.throttling.StaffUserRateThrottle',
'rest_framework.throttling.AnonRateThrottle',
'rest_framework.throttling.UserRateThrottle',
],
'DEFAULT_THROTTLE_RATES': REST_FRAMEWORK_DEFAULT_THROTTLE_RATES,
'EXCEPTION_HANDLER': 'packagedb.throttling.throttled_exception_handler',
'DEFAULT_PAGINATION_CLASS': 'packagedb.api_custom.PageSizePagination',
# Limit the load on the Database returning a small number of records by default. https://github.com/nexB/vulnerablecode/issues/819
"PAGE_SIZE": 20,
}
if not PURLDB_REQUIRE_AUTHENTICATION:
REST_FRAMEWORK["DEFAULT_PERMISSION_CLASSES"] = (
"rest_framework.permissions.AllowAny",
)
if DEBUG_TOOLBAR:
INSTALLED_APPS += ("debug_toolbar",)
MIDDLEWARE += ("debug_toolbar.middleware.DebugToolbarMiddleware",)
DEBUG_TOOLBAR_PANELS = (
"debug_toolbar.panels.history.HistoryPanel",
"debug_toolbar.panels.versions.VersionsPanel",
"debug_toolbar.panels.timer.TimerPanel",
"debug_toolbar.panels.settings.SettingsPanel",
"debug_toolbar.panels.headers.HeadersPanel",
"debug_toolbar.panels.request.RequestPanel",
"debug_toolbar.panels.sql.SQLPanel",
"debug_toolbar.panels.staticfiles.StaticFilesPanel",
"debug_toolbar.panels.templates.TemplatesPanel",
"debug_toolbar.panels.cache.CachePanel",
"debug_toolbar.panels.signals.SignalsPanel",
"debug_toolbar.panels.logging.LoggingPanel",
"debug_toolbar.panels.redirects.RedirectsPanel",
"debug_toolbar.panels.profiling.ProfilingPanel",
)
INTERNAL_IPS = [
"127.0.0.1",
]
# Active seeders: each active seeder class need to be added explictly here
ACTIVE_SEEDERS = [
'minecode.visitors.maven.MavenSeed',
]
|
nexB/purldb
|
purldb_project/settings.py
|
settings.py
|
py
| 7,976 |
python
|
en
|
code
| 23 |
github-code
|
6
|
31812175593
|
def sqdlist(l):
i=0
while(i<len(l)):
l[i]=(l[i])**2
i=i+1
return l
#trace of a matrix
def trace(m):
if len(m)!= len(m[0]):
print('matrix is non-square')
else:
i=0
a=0
while i<len(m[0]):
a=a+m[i][i]
i=i+1
return a
#create identity matrix
def id(k):
i=0
id=[]
while i<k:
j=0
row=[]
while j<k:
if j==i:
row=row+[1]
else:
row=row+[0]
j=j+1
id=id+[row]
i=i+1
return id
|
cnc99/Collab
|
sqdlist.py
|
sqdlist.py
|
py
| 580 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30898444460
|
#READING AND ORDERING
fin=open("cowjump.in", "r")
lines=fin.readlines()
n=int(lines[0])
lines.remove(lines[0])
#Sweep
points=[]
for i in range(n):
lines[i]=lines[i].split()
lines[i]=[int(x) for x in lines[i]]
beg=lines[i][0]>lines[i][2]
points.append([lines[i][0], lines[i][1], beg, i])
points.append([lines[i][2], lines[i][3], not beg, i])
def my_key_function(entry):
return(entry[0], entry[2])
points=sorted(points, key=my_key_function)
def box_inter(a, b):
return min(a[0], a[2])<=max(b[0], b[2]) and max(a[0], a[2])>=min(b[0], b[2]) and min(a[1], a[3])<=max(b[1], b[3]) and max(a[1], a[3])>=min(b[1], b[3])
active=[]
count=0
inters=[]
final=0
i=-1
for i in range(len(points)):
if final!=0:
break
if not points[i][2]: #Left point
#active=sorted(active, key=lambda entry: entry[1])
found=False
for j in range(len(active)):
x1=lines[points[i][3]][0]
y1=lines[points[i][3]][1]
x2=lines[points[i][3]][2]
y2=lines[points[i][3]][3]
x3=active[j][0]
y3=active[j][1]
x4=active[j][2]
y4=active[j][3]
if x2!=x1:
m1=(y2-y1)/(x2-x1)
else:
m1=n
if x4!=x3:
m2=(y4-y3)/(x4-x3)
else:
m2=n
if m2!=m1:
x=(m1*x1-y1-m2*x3+y3)/(m1-m2)
if (x1<x<x2 or x2<x<x1) and (x3<x<x4 or x4<x<x3):
index1=i
index2=j
j=len(active)-1
if index1+1 in inters:
final=points[index1][3]+1
elif index2+1 in inters:
final=points[index2][3]+1
else:
inters.append(index1+1)
inters.append(index2+1)
found=True
active.append(lines[points[i][3]])
else: #Right point
active.remove(lines[points[i][3]])
"""
def box_inter(a, b):
return min(a[0], a[2])<=max(b[0], b[2]) and max(a[0], a[2])>=min(b[0], b[2]) and min(a[1], a[3])<=max(b[1], b[3]) and max(a[1], a[3])>=min(b[1], b[3])
found=False
index1=0
index2=0
for i in range(n-1):
for j in range(i+1, n):
if box_inter(lines[i], lines[j]):
x1=lines[i][0]
y1=lines[i][1]
x2=lines[i][2]
y2=lines[i][3]
x3=lines[j][0]
y3=lines[j][1]
x4=lines[j][2]
y4=lines[j][3]
if x2!=x1:
m1=(y2-y1)/(x2-x1)
else:
m1=n
if x4!=x3:
m2=(y4-y3)/(x4-x3)
else:
m2=n
if m2!=m1:
x=(m1*x1-y1-m2*x3+y3)/(m1-m2)
if x1<x<x2 and x3<x<x4 and not found:
index1=i
index2=j
i=n-2
j=n-1
found=True
index1s=0
found=False
for j in range(n):
if box_inter(lines[index1], lines[j]):
x1=lines[index1][0]
y1=lines[index1][1]
x2=lines[index1][2]
y2=lines[index1][3]
x3=lines[j][0]
y3=lines[j][1]
x4=lines[j][2]
y4=lines[j][3]
if x2!=x1:
m1=(y2-y1)/(x2-x1)
else:
m1=n
if x4!=x3:
m2=(y4-y3)/(x4-x3)
else:
m2=n
if m2!=m1:
x=(m1*x1-y1-m2*x3+y3)/(m1-m2)
if x1<x<x2 and x3<x<x4 and not found:
index1s+=1
if index1s==2:
found=True
a=""
if found:
a=str(index1+1)+"\n"
else:
a=str(index2+1)+"\n"
"""
a=str(final)+"\n"
#print(a)
fout=open("cowjump.out", "w")
fout.write(a)
fout.close()
|
nomichka/2019-03-31-USACO-Silver
|
cowjump.py
|
cowjump.py
|
py
| 3,850 |
python
|
en
|
code
| 0 |
github-code
|
6
|
28395932084
|
import torch
from torch import optim
from torch import nn
from torch.utils import data
from data import AnimeDataset, LossWriter
from model import Generator, Discriminator
DATA_DIR = "../datasets/selfie2anime/all"
MODEL_G_PATH = "./Net_G.pth"
MODEL_D_PATH = "./Net_D.pth"
LOG_G_PATH = "./Log_G.txt"
LOG_D_PATH = "./Log_D.txt"
IMAGE_SIZE = 64
BATCH_SIZE = 128
WORKER = 1
LR = 0.0002
NZ = 100
num_epochs = 300
dataset = AnimeDataset(dataset_path=DATA_DIR, image_size=IMAGE_SIZE)
data_loader = data.DataLoader(dataset, batch_size=BATCH_SIZE,
shuffle=True, num_workers=WORKER)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
netG = Generator().to(device)
netD = Discriminator().to(device)
criterion = nn.BCELoss()
real_label = 1.
fake_label = 0.
optimizerD = optim.Adam(netD.parameters(), lr=LR, betas=(0.5, 0.999))
optimizerG = optim.Adam(netG.parameters(), lr=LR, betas=(0.5, 0.999))
g_writer = LossWriter(save_path=LOG_G_PATH)
d_writer = LossWriter(save_path=LOG_D_PATH)
img_list = []
G_losses = []
D_losses = []
iters = 0
print(dataset.__len__())
print("开始训练")
for epoch in range(num_epochs):
for data in data_loader:
#################################################
# 1. 更新判别器D: 最大化 log(D(x)) + log(1 - D(G(z)))
# 等同于最小化 - log(D(x)) - log(1 - D(G(z)))
#################################################
netD.zero_grad()
# 1.1 来自数据集的样本
real_imgs = data.to(device)
b_size = real_imgs.size(0)
label = torch.full((b_size,), real_label, dtype=torch.float, device=device)
# 使用鉴别器对数据集样本做判断
output = netD(real_imgs).view(-1)
# 计算交叉熵损失 -log(D(x))
errD_real = criterion(output, label)
# 对判别器进行梯度回传
errD_real.backward()
D_x = output.mean().item()
# 1.2 生成随机向量
noise = torch.randn(b_size, NZ, device=device)
# 来自生成器生成的样本
fake = netG(noise)
label.fill_(fake_label)
# 使用鉴别器对生成器生成样本做判断
output = netD(fake.detach()).view(-1)
# 计算交叉熵损失 -log(1 - D(G(z)))
errD_fake = criterion(output, label)
# 对判别器进行梯度回传
errD_fake.backward()
D_G_z1 = output.mean().item()
# 对判别器计算总梯度,-log(D(x))-log(1 - D(G(z)))
errD = errD_real + errD_fake
# 更新判别器
optimizerD.step()
#################################################
# 2. 更新判别器G: 最小化 log(D(x)) + log(1 - D(G(z))),
# 等同于最小化log(1 - D(G(z))),即最小化-log(D(G(z)))
# 也就等同于最小化-(log(D(G(z)))*1+log(1-D(G(z)))*0)
# 令生成器样本标签值为1,上式就满足了交叉熵的定义
#################################################
netG.zero_grad()
# 对于生成器训练,令生成器生成的样本为真,
label.fill_(real_label)
# 输入生成器的生成的假样本
output = netD(fake).view(-1)
# 对生成器计算损失
errG = criterion(output, label)
# 对生成器进行梯度回传
errG.backward()
D_G_z2 = output.mean().item()
# 更新生成器
optimizerG.step()
# 输出损失状态
if iters % 5 == 0:
print('[%d/%d][%d/%d]\tLoss_D: %.4f\tLoss_G: %.4f\tD(x): %.4f\tD(G(z)): %.4f / %.4f'
% (epoch, num_epochs, iters, len(data_loader),
errD.item(), errG.item(), D_x, D_G_z1, D_G_z2))
d_writer.add(loss=errD.item(), i=iters)
g_writer.add(loss=errG.item(), i=iters)
# 保存损失记录
G_losses.append(errG.item())
D_losses.append(errD.item())
iters += 1
|
cwpeng-cn/DCGAN
|
train.py
|
train.py
|
py
| 3,971 |
python
|
en
|
code
| 0 |
github-code
|
6
|
39920785113
|
from flask import Flask, render_template, request, redirect, jsonify, after_this_request
from flask_cors import CORS
from app.trajectory import *
from app.ion import get_ion
from app.esp import *
esp_addr = ''
data = {}
app = Flask(__name__,
static_url_path='',
static_folder='static',
template_folder="templates")
CORS(app)
@app.route('/esp')
def esp():
return esp_simulation()
@app.route('/api/esp')
def api_esp():
global data
data = esp_parse(esp_addr)
return jsonify(data)
@app.route('/time')
def time():
global data
data = esp_parse(esp_addr)
return render_template('time.html', time=data['time'])
@app.route('/api/tracking/<int:norad>')
def api_tracking_prn(norad):
res = jsonify(get_trajectory(norad))
res.headers.add("Access-Control-Allow-Origin", "*")
return res
@app.route('/tracking/<int:norad>')
def tracking_norad(norad):
return render_template('tracking.html', norad=norad)
@app.route('/tracking')
def tracking():
global data
if (not data):
data = esp_parse(esp_addr)
prn_norad = get_norad(data)
print (prn_norad)
return render_template('tracking_menu.html', prn_norad=prn_norad)
@app.route('/api/ion/<int:norad>')
def api_ion_prn(norad):
res = jsonify(get_ion(norad))
res.headers.add("Access-Control-Allow-Origin", "*")
return res
@app.route('/ion/<int:norad>')
def ion_norad(norad):
return render_template('ion.html', norad=norad)
@app.route('/ion')
def ion():
global data
if (not data):
data = esp_parse(esp_addr)
prn_norad = get_norad(data)
print (prn_norad)
return render_template('ion_menu.html', prn_norad=prn_norad)
@app.route('/settings', methods = ['POST', 'GET'])
def settings():
global esp_addr
if request.method == 'POST':
esp_addr = request.form['ip']
return redirect('/')
else:
return render_template('settings.html')
@app.route('/')
def home():
global esp_addr
if (esp_addr == ''):
return redirect('/settings')
return render_template('index.html')
|
Eugen171/gps
|
app/__init__.py
|
__init__.py
|
py
| 1,952 |
python
|
en
|
code
| 0 |
github-code
|
6
|
39911784422
|
import argparse
import time
import warnings
import pickle
import torch
import random
import numpy as np
import pandas as pd
import torch.nn.functional as F
from transformers import AutoTokenizer, AutoModelForSequenceClassification, Trainer, TrainingArguments, ElectraForSequenceClassification, AdamW
from torch import nn, optim
from torch.utils.data import DataLoader
from torch.cuda.amp import autocast, GradScaler
from tqdm import tqdm
from data_set import *
def seed_everything(seed):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
def get_pickle(pickle_path):
'''Custom Dataset을 Load하기 위한 함수'''
f = open(pickle_path, "rb")
dataset = pickle.load(f)
f.close()
return dataset
def get_data():
tokenizer = AutoTokenizer.from_pretrained("xlm-roberta-large")
ai_hub = get_pickle("../../data/ai_hub_dataset.pkl")
train_token, train_label = tokenized_dataset(ai_hub["train"], tokenizer)
val_token, val_label = tokenized_dataset(ai_hub["validation"], tokenizer)
train_set = RE_Dataset(train_token, train_label)
val_set = RE_Dataset(val_token, val_label)
train_iter = DataLoader(train_set, batch_size=16, shuffle=True)
val_iter = DataLoader(val_set, batch_size=16, shuffle=True)
return train_iter, val_iter
def get_model():
network = AutoModelForSequenceClassification.from_pretrained("xlm-roberta-large", num_labels=6, hidden_dropout_prob=0.0).to("cuda:0")
optimizer = AdamW(network.parameters(), lr=5e-6)
scaler = GradScaler()
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer=optimizer, T_max=10, eta_min=1e-6)
criterion = nn.CrossEntropyLoss().to("cuda:0")
return network, optimizer, scaler, scheduler, criterion
def training_per_step(model, loss_fn, optimizer, scaler, input_ids, attention_mask, labels, device):
'''매 step마다 학습을 하는 함수'''
model.train()
with autocast():
labels = labels.to(device)
preds = model(input_ids.to(device), attention_mask = attention_mask.to(device))[0]
loss = loss_fn(preds, labels)
scaler.scale(loss).backward()
scaler.step(optimizer)
scaler.update()
optimizer.zero_grad()
return loss
def validating_per_steps(epoch, model, loss_fn, test_loader, device):
'''특정 step마다 검증을 하는 함수'''
model.eval()
loss_sum = 0
sample_num = 0
preds_all = []
targets_all = []
pbar = tqdm(test_loader, total=len(test_loader), position=0, leave=True)
for input_ids, attention_mask, labels in pbar :
labels = labels.to(device)
preds = model(input_ids.to(device), attention_mask = attention_mask.to(device))[0]
preds_all += [torch.argmax(preds, 1).detach().cpu().numpy()]
targets_all += [labels.detach().cpu().numpy()]
loss = loss_fn(preds, labels)
loss_sum += loss.item()*labels.shape[0]
sample_num += labels.shape[0]
description = f"epoch {epoch + 1} loss: {loss_sum/sample_num:.4f}"
pbar.set_description(description)
preds_all = np.concatenate(preds_all)
targets_all = np.concatenate(targets_all)
accuracy = (preds_all == targets_all).mean()
print(" test accuracy = {:.4f}".format(accuracy))
return accuracy
def train(model, loss_fn, optimizer, scaler, train_loader, test_loader, scheduler, device):
'''training과 validating을 진행하는 함수'''
prev_acc = 0
global_steps = 0
for epoch in range(1):
running_loss = 0
sample_num = 0
preds_all = []
targets_all = []
pbar = tqdm(enumerate(train_loader), total=len(train_loader), position=0, leave=True)
for step, (input_ids, attention_mask, labels) in pbar:
# training phase
loss = training_per_step(model, loss_fn, optimizer, scaler, input_ids, attention_mask, labels, device)
running_loss += loss.item()*labels.shape[0]
sample_num += labels.shape[0]
global_steps += 1
description = f"{epoch+1}epoch {global_steps: >4d}step | loss: {running_loss/sample_num: .4f} "
pbar.set_description(description)
# validating phase
if global_steps % 500 == 0 :
with torch.no_grad():
acc = validating_per_steps(epoch, model, loss_fn, test_loader, device)
if acc > prev_acc:
torch.save(model, "../../output/question_model.pt")
prev_acc = acc
if scheduler is not None :
scheduler.step()
def main():
seed_everything(2021)
train_iter, val_iter = get_data()
network, optimizer, scaler, scheduler, criterion = get_model()
train(network, criterion, optimizer, scaler, train_iter, val_iter, scheduler, "cuda:0")
if __name__ == "__main__":
main()
|
TEAM-IKYO/Open-Domain-Question-Answering
|
code/question_labeling/train.py
|
train.py
|
py
| 5,127 |
python
|
en
|
code
| 24 |
github-code
|
6
|
32925752157
|
import scrapy
import os
import wget
class BlogSpider(scrapy.Spider):
name = 'blogspider'
start_urls = ['https://www.va.gov/vdl/application.asp?appid=6']
def parse(self, response):
try:
link='https://www.va.gov/vdl/'
for title in response.xpath('//tr'):
sect=response.xpath('//*[@id="tier4innerContent"]/p').css('::text').get().replace("Section","")
pack=response.xpath('//*[@id="tier4innerContent"]/h2[2]').css('::text').get()
cnt=0
doc="<td></td>"
pdf="<td></td>"
for title1 in title.xpath('td'):
#print(title.xpath('td').css('::text').get())
if cnt==0:
titl=title1.css('::text').get()
if cnt==3:
for title2 in title1.css('::text'):
if title2.get()=="PDF":
pdf='<td><a href="' + link + title1.xpath('a').xpath('@href').extract()[0] + '">Link</a></td>'
elif title2.get()=="DOCX":
doc='<td><a href="' + link + title1.xpath('a').xpath('@href').extract()[1] + '">Link</a></td>'
print('<tr><td>' + sect + '</td><td>' + pack + '</td><td>' + titl + '</td>' + doc + pdf + '</tr>\n')
cnt=cnt+1
except:
print("")
try:
for next_page in response.xpath('//td/a'):
yield response.follow(next_page, self.parse)
except:
print("")
|
RamSailopal/VA-Markup
|
scrape3.py
|
scrape3.py
|
py
| 1,537 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27391464453
|
import logging
import os
from urllib.parse import urljoin, urlunparse
from rdflib import Graph, Literal, Namespace
from rdflib.namespace import OWL, RDF, RDFS, XSD
from crunch_uml import const, db, util
from crunch_uml.excpetions import CrunchException
from crunch_uml.renderers.renderer import ModelRenderer, RendererRegistry
logger = logging.getLogger()
class LodRenderer(ModelRenderer):
'''
Renders all model packages using jinja2 and a template.
A model package is a package with at least 1 class inside
'''
def writeToFile(self, graph, args):
pass
def render(self, args, database: db.Database):
if args.linked_data_namespace is None:
logger.warning(
f'No namespace provided via parameter "linked_data_namespace", using default {const.DEFAULT_LOD_NS}'
)
args.linked_data_namespace = const.DEFAULT_LOD_NS
elif not isinstance(args.linked_data_namespace, str):
args.linked_data_namespace = urlunparse(args.linked_data_namespace)
# sourcery skip: raise-specific-error
MYNS = Namespace(args.linked_data_namespace) # noqa: F841
schema = Namespace("http://schema.org/") # noqa: F841
# Create graph
g = Graph()
# Get list of packages that are to be rendered
models = self.getModels(args, database)
if len(models) is None:
msg = "Cannot render output: packages does not exist"
logger.error(msg)
raise CrunchException(msg)
class_dict = {} # used to find all classes by guid
# First add all classes
for model in models:
modelname = util.remove_substring(model.name, 'model')
ns = Namespace(urljoin(str(args.linked_data_namespace), f"/{modelname}/"))
for cls in model.classes:
# Werk eerst de dict bij
class_dict[cls.id] = ns[cls.id]
# Voeg de klasse toe
g.add((ns[cls.id], RDF.type, OWL.Class))
g.add((ns[cls.id], RDFS.label, Literal(cls.name)))
if cls.definitie is not None:
g.add((ns[cls.id], RDFS.comment, Literal(cls.definitie)))
for attribute in cls.attributes:
# Voeg de attributen toe
if attribute.name is not None and attribute.primitive is not None:
g.add((ns[attribute.id], RDF.type, OWL.DatatypeProperty))
g.add((ns[attribute.id], RDFS.domain, ns[cls.id]))
g.add((ns[attribute.id], RDFS.label, Literal(attribute.name)))
g.add((ns[attribute.id], RDFS.range, XSD.string))
if attribute.definitie is not None:
g.add((ns[attribute.id], RDFS.comment, Literal(attribute.definitie)))
# Then add all relations
for model in models:
for cls in model.classes:
# First set inheritance
for subclass in cls.subclasses:
super_cls = class_dict.get(cls.id)
if subclass.superclass is not None:
sub_cls = class_dict.get(subclass.superclass.id)
if super_cls is not None and sub_cls is not None:
g.add((sub_cls, RDFS.subClassOf, super_cls))
# Then set associations
for assoc in cls.uitgaande_associaties:
from_cls = class_dict.get(cls.id)
to_cls = class_dict.get(assoc.dst_class.id)
if from_cls is not None and to_cls is not None:
# Voeg properties toe
g.add((ns[assoc.id], RDF.type, OWL.ObjectProperty))
g.add((ns[assoc.id], RDFS.domain, from_cls))
g.add((ns[assoc.id], RDFS.range, to_cls))
g.add((ns[assoc.id], RDFS.label, Literal(assoc.name)))
if assoc.definitie is not None:
g.add((ns[assoc.id], RDFS.comment, Literal(assoc.definitie)))
self.writeToFile(g, args)
@RendererRegistry.register(
"ttl",
descr='Renderer that renders Linked Data ontology in turtle from the supplied models, '
+ 'where a model is a package that includes at least one Class. '
+ 'Needs parameter "output_lod_url".',
)
class TTLRenderer(LodRenderer):
'''
Renders all model packages using jinja2 and a template.
A model package is a package with at least 1 class inside
'''
def writeToFile(self, graph, args):
# get filename
base_name, ext = os.path.splitext(args.outputfile)
outputfile = f'{base_name}.ttl'
with open(outputfile, 'w') as file:
file.write(graph.serialize(format='turtle'))
@RendererRegistry.register(
"rdf",
descr='Renderer that renders Linked Data ontology in RDF from the supplied models, '
+ 'where a model is a package that includes at least one Class. '
+ ' Needs parameter "output_lod_url".',
)
class RDFRenderer(LodRenderer):
'''
Renders all model packages using jinja2 and a template.
A model package is a package with at least 1 class inside
'''
def writeToFile(self, graph, args):
# get filename
base_name, ext = os.path.splitext(args.outputfile)
outputfile = f'{base_name}.rdf'
with open(outputfile, 'w') as file:
file.write(graph.serialize(format='xml'))
@RendererRegistry.register(
"json-ld",
descr='Renderer that renders Linked Data ontology in JSON-LD from the supplied models, '
+ 'where a model is a package that includes at least one Class. '
+ ' Needs parameter "output_lod_url".',
)
class JSONLDRenderer(LodRenderer):
'''
Renders all model packages using jinja2 and a template.
A model package is a package with at least 1 class inside
'''
def writeToFile(self, graph, args):
# get filename
base_name, ext = os.path.splitext(args.outputfile)
outputfile = f'{base_name}.jsonld'
with open(outputfile, 'w') as file:
file.write(graph.serialize(format='json-ld'))
|
brienen/crunch_uml
|
crunch_uml/renderers/lodrenderer.py
|
lodrenderer.py
|
py
| 6,268 |
python
|
en
|
code
| 0 |
github-code
|
6
|
19400181919
|
from typing import List
import common.arrayCommon as Array
import heapq
class Solution:
def pondSizes(self, land: List[List[int]]) -> List[int]:
h = len(land)
w = len(land[0])
result = []
for i in range(h):
for j in range(w):
if land[i][j] == 0:
a = []
self.search(land, w, h, i, j, a)
heapq.heappush(result, len(a))
return heapq.nsmallest(len(result), result)
def search(self, land, w, h, i, j, ans):
if i < 0 or i >= h or j < 0 or j >= w:
return
if land[i][j] != 0:
return
if land[i][j] == 0:
land[i][j] = 1
ans.append(0)
self.search(land, w, h, i + 1, j + 1, ans)
self.search(land, w, h, i - 1, j - 1, ans)
self.search(land, w, h, i + 1, j - 1, ans)
self.search(land, w, h, i - 1, j + 1, ans)
self.search(land, w, h, i - 1, j, ans)
self.search(land, w, h, i + 1, j, ans)
self.search(land, w, h, i, j + 1, ans)
self.search(land, w, h, i, j - 1, ans)
land = [
[0, 2, 1, 0],
[0, 1, 0, 1],
[1, 1, 0, 1],
[0, 1, 0, 1]
]
Array.print2DArray(land)
r = Solution().pondSizes(land)
print(r)
|
Yigang0622/LeetCode
|
pondSizes.py
|
pondSizes.py
|
py
| 1,310 |
python
|
en
|
code
| 1 |
github-code
|
6
|
28487843956
|
from ..database.userProgressPlans import \
userSkillProgressPlan, \
skillsMappedToSkillSet, \
collaboratorMappedToCollabSet, \
goalsMappedToGoalSets, \
goals, \
weeklySkillSetFeedBack, \
users, \
skills
from ..database.dbConnection import dbConnection
class usersDao(dbConnection):
def __init__(self):
super().__init__(users)
# Let's garentee we'll only be getting one row. Think it makes sense
# to only be dealing with one at a time.
# self.rowLimit = 1
def deleteRow(self, row):
if isinstance(row, users):
super(usersDao, self).deleteRow(row)
else:
return False
def query(self,
userId = None,
userName = None,
userFullName = None,
rowLimit = 1
):
q = {}
if userId:
q['userId'] = userId
if userName:
q['userName'] = userName
if userFullName:
q['userName'] = userFullName
# self.results =
return super(usersDao, self).query(
queries=q,
limit=rowLimit
)
def insertRow(self, userName=None, userFullName=None):
row = super(usersDao, self)._createNewRowInstance()
row.userName = userName
row.userFullName = userFullName
super(usersDao, self).insertRow(row)
return row
class skillsDao(dbConnection):
def __init__(self):
super().__init__(skills)
# Let's garentee we'll only be getting one row. Think it makes sense
# to only be dealing with one at a time.
# self.rowLimit = 1
def deleteRow(self, row):
if isinstance(row, users):
super(skillsDao, self).deleteRow(row)
else:
return False
def query(self,
skillId=None,
skillName=None,
skillDescription=None,
rowLimit=1
):
q = {}
if skillId:
q['skillId'] = skillId
if skillName:
q['skillName'] = skillName
if skillDescription:
q['skillDescription'] = skillDescription
# self.results =
return super(skillsDao, self).query(
q,
rowLimit
)
def insertRow(self,
skillName=None,
skillDescription=None
):
row = super(skillsDao, self)._createNewRowInstance()
row.skillName = skillName
row.skillDescription = skillDescription
super(skillsDao, self).insertRow(row)
return row
class userSkillProgressPlanDao(object):
pass
class skillsMappedToSkillSetDao(dbConnection):
def __init__(self):
super().__init__(skillsMappedToSkillSet)
# Let's garentee we'll only be getting one row. Think it makes sense
# to only be dealing with one at a time.
# self.rowLimit = 1
def deleteRow(self, row):
if isinstance(row, users):
super(skillsMappedToSkillSetDao, self).deleteRow(row)
else:
return False
def query(self,
skillSetId=None,
skillSetName=None,
rowLimit=1
):
q = {}
if skillSetId:
q['skillSetId'] = skillSetId
if skillSetName:
q['skillSetName'] = skillSetName
return super(skillsMappedToSkillSetDao, self).query(
q,
rowLimit
)
def insertRow(self,
skillSetName=None,
skillIds=None,
skillSetId=None
):
row = super(skillsMappedToSkillSetDao, self)._createNewRowInstance()
row.skillSetName = skillSetName
row.skillIds = skillIds
if skillSetId:
row.skillSetId = skillSetId
## We're going to let skillSetId be set by the table automatically.
super(skillsMappedToSkillSetDao, self).insertRow(row)
return row
class collaboratorMappedToCollabSetDao(object):
pass
class goalsMappedToGoalSetsDao(object):
pass
class goalsDao(object):
pass
class weeklySkillSetFeedBackDao(object):
pass
|
mraison/work_profiling_app
|
work_profiling_app/modules/daos/daos.py
|
daos.py
|
py
| 4,213 |
python
|
en
|
code
| 0 |
github-code
|
6
|
22257701787
|
"""Filters module with a class to manage filters/algorithms for polydata datasets."""
import collections.abc
import logging
import numpy as np
import pyvista
from pyvista import (
abstract_class, _vtk, NORMALS, generate_plane, assert_empty_kwargs, vtk_id_list_to_array, get_array
)
from pyvista.core.errors import NotAllTrianglesError
from pyvista.core.filters import _get_output, _update_alg
from pyvista.core.filters.data_set import DataSetFilters
@abstract_class
class PolyDataFilters(DataSetFilters):
"""An internal class to manage filters/algorithms for polydata datasets."""
def edge_mask(poly_data, angle):
"""Return a mask of the points of a surface mesh that has a surface angle greater than angle.
Parameters
----------
angle : float
Angle to consider an edge.
"""
if not isinstance(poly_data, pyvista.PolyData): # pragma: no cover
poly_data = pyvista.PolyData(poly_data)
poly_data.point_arrays['point_ind'] = np.arange(poly_data.n_points)
featureEdges = _vtk.vtkFeatureEdges()
featureEdges.SetInputData(poly_data)
featureEdges.FeatureEdgesOn()
featureEdges.BoundaryEdgesOff()
featureEdges.NonManifoldEdgesOff()
featureEdges.ManifoldEdgesOff()
featureEdges.SetFeatureAngle(angle)
featureEdges.Update()
edges = _get_output(featureEdges)
orig_id = pyvista.point_array(edges, 'point_ind')
return np.in1d(poly_data.point_arrays['point_ind'], orig_id,
assume_unique=True)
def boolean_cut(poly_data, cut, tolerance=1E-5, inplace=False):
"""Perform a Boolean cut using another mesh.
Parameters
----------
cut : pyvista.PolyData
Mesh making the cut
inplace : bool, optional
Updates mesh in-place.
Returns
-------
mesh : pyvista.PolyData
The cut mesh.
"""
if not isinstance(cut, pyvista.PolyData):
raise TypeError("Input mesh must be PolyData.")
if not poly_data.is_all_triangles() or not cut.is_all_triangles():
raise NotAllTrianglesError("Make sure both the input and output are triangulated.")
bfilter = _vtk.vtkBooleanOperationPolyDataFilter()
bfilter.SetOperationToIntersection()
# bfilter.SetOperationToDifference()
bfilter.SetInputData(1, cut)
bfilter.SetInputData(0, poly_data)
bfilter.ReorientDifferenceCellsOff()
bfilter.SetTolerance(tolerance)
bfilter.Update()
mesh = _get_output(bfilter)
if inplace:
poly_data.overwrite(mesh)
return poly_data
else:
return mesh
def boolean_add(poly_data, mesh, inplace=False):
"""Add a mesh to the current mesh.
Does not attempt to "join" the meshes.
Parameters
----------
mesh : pyvista.PolyData
The mesh to add.
inplace : bool, optional
Updates mesh in-place.
Returns
-------
joinedmesh : pyvista.PolyData
The joined mesh.
"""
if not isinstance(mesh, pyvista.PolyData):
raise TypeError("Input mesh must be PolyData.")
vtkappend = _vtk.vtkAppendPolyData()
vtkappend.AddInputData(poly_data)
vtkappend.AddInputData(mesh)
vtkappend.Update()
mesh = _get_output(vtkappend)
if inplace:
poly_data.overwrite(mesh)
return poly_data
else:
return mesh
def __add__(poly_data, mesh):
"""Merge these two meshes."""
if not isinstance(mesh, _vtk.vtkPolyData):
return DataSetFilters.__add__(poly_data, mesh)
return PolyDataFilters.boolean_add(poly_data, mesh)
def boolean_union(poly_data, mesh, inplace=False):
"""Combine two meshes and attempts to create a manifold mesh.
Parameters
----------
mesh : pyvista.PolyData
The mesh to perform a union against.
inplace : bool, optional
Updates mesh in-place.
Returns
-------
union : pyvista.PolyData
The union mesh.
"""
if not isinstance(mesh, pyvista.PolyData):
raise TypeError("Input mesh must be PolyData.")
bfilter = _vtk.vtkBooleanOperationPolyDataFilter()
bfilter.SetOperationToUnion()
bfilter.SetInputData(1, mesh)
bfilter.SetInputData(0, poly_data)
bfilter.ReorientDifferenceCellsOff()
bfilter.Update()
mesh = _get_output(bfilter)
if inplace:
poly_data.overwrite(mesh)
return poly_data
else:
return mesh
def boolean_difference(poly_data, mesh, inplace=False):
"""Combine two meshes and retains only the volume in common between the meshes.
Parameters
----------
mesh : pyvista.PolyData
The mesh to perform a union against.
inplace : bool, optional
Updates mesh in-place.
Returns
-------
union : pyvista.PolyData
The union mesh.
"""
if not isinstance(mesh, pyvista.PolyData):
raise TypeError("Input mesh must be PolyData.")
bfilter = _vtk.vtkBooleanOperationPolyDataFilter()
bfilter.SetOperationToDifference()
bfilter.SetInputData(1, mesh)
bfilter.SetInputData(0, poly_data)
bfilter.ReorientDifferenceCellsOff()
bfilter.Update()
mesh = _get_output(bfilter)
if inplace:
poly_data.overwrite(mesh)
return poly_data
else:
return mesh
def intersection(poly_data, mesh, split_first=True, split_second=True):
"""Compute the intersection between two meshes.
Parameters
----------
mesh : pyvista.PolyData
The mesh to intersect with.
split_first : bool, optional
If `True`, return the first input mesh split by the intersection with the
second input mesh.
split_second : bool, optional
If `True`, return the second input mesh split by the intersection with the
first input mesh.
Returns
-------
intersection: pyvista.PolyData
The intersection line.
first_split: pyvista.PolyData
The first mesh split along the intersection. Returns the original first mesh
if `split_first` is False.
second_split: pyvista.PolyData
The second mesh split along the intersection. Returns the original second mesh
if `split_second` is False.
Examples
--------
Intersect two spheres, returning the intersection and both spheres
which have new points/cells along the intersection line.
>>> import pyvista as pv
>>> s1 = pv.Sphere()
>>> s2 = pv.Sphere(center=(0.25, 0, 0))
>>> intersection, s1_split, s2_split = s1.intersection(s2)
The mesh splitting takes additional time and can be turned
off for either mesh individually.
>>> intersection, _, s2_split = s1.intersection(s2, \
split_first=False, \
split_second=True)
"""
intfilter = _vtk.vtkIntersectionPolyDataFilter()
intfilter.SetInputDataObject(0, poly_data)
intfilter.SetInputDataObject(1, mesh)
intfilter.SetComputeIntersectionPointArray(True)
intfilter.SetSplitFirstOutput(split_first)
intfilter.SetSplitSecondOutput(split_second)
intfilter.Update()
intersection = _get_output(intfilter, oport=0)
first = _get_output(intfilter, oport=1)
second = _get_output(intfilter, oport=2)
return intersection, first, second
def curvature(poly_data, curv_type='mean'):
"""Return the pointwise curvature of a mesh.
Parameters
----------
mesh : vtk.polydata
vtk polydata mesh
curvature string, optional
One of the following strings
Mean
Gaussian
Maximum
Minimum
Returns
-------
curvature : np.ndarray
Curvature values
"""
curv_type = curv_type.lower()
# Create curve filter and compute curvature
curvefilter = _vtk.vtkCurvatures()
curvefilter.SetInputData(poly_data)
if curv_type == 'mean':
curvefilter.SetCurvatureTypeToMean()
elif curv_type == 'gaussian':
curvefilter.SetCurvatureTypeToGaussian()
elif curv_type == 'maximum':
curvefilter.SetCurvatureTypeToMaximum()
elif curv_type == 'minimum':
curvefilter.SetCurvatureTypeToMinimum()
else:
raise ValueError('Curv_Type must be either "Mean", '
'"Gaussian", "Maximum", or "Minimum"')
curvefilter.Update()
# Compute and return curvature
curv = _get_output(curvefilter)
return _vtk.vtk_to_numpy(curv.GetPointData().GetScalars())
def plot_curvature(poly_data, curv_type='mean', **kwargs):
"""Plot the curvature.
Parameters
----------
curvtype : str, optional
One of the following strings indicating curvature type:
* ``'Mean'``
* ``'Gaussian'``
* ``'Maximum'``
* ``'Minimum'``
**kwargs : optional
See :func:`pyvista.plot`
Returns
-------
cpos : list
List of camera position, focal point, and view up.
Examples
--------
Plot the mean curvature of an example mesh.
>>> from pyvista import examples
>>> hills = examples.load_random_hills()
>>> cpos = hills.plot_curvature(smooth_shading=True)
"""
kwargs.setdefault('scalar_bar_args',
{'title': f'{curv_type.capitalize()} Curvature'})
return poly_data.plot(scalars=poly_data.curvature(curv_type),
**kwargs)
def triangulate(poly_data, inplace=False):
"""Return an all triangle mesh.
More complex polygons will be broken down into tetrahedrals.
Parameters
----------
inplace : bool, optional
Updates mesh in-place.
Returns
-------
mesh : pyvista.PolyData
Mesh containing only triangles.
"""
trifilter = _vtk.vtkTriangleFilter()
trifilter.SetInputData(poly_data)
trifilter.PassVertsOff()
trifilter.PassLinesOff()
trifilter.Update()
mesh = _get_output(trifilter)
if inplace:
poly_data.overwrite(mesh)
return poly_data
else:
return mesh
def smooth(poly_data, n_iter=20, relaxation_factor=0.01, convergence=0.0,
edge_angle=15, feature_angle=45,
boundary_smoothing=True, feature_smoothing=False, inplace=False):
"""Adjust point coordinates using Laplacian smoothing.
The effect is to "relax" the mesh, making the cells better shaped and
the vertices more evenly distributed.
Parameters
----------
n_iter : int
Number of iterations for Laplacian smoothing.
relaxation_factor : float, optional
Relaxation factor controls the amount of displacement in a single
iteration. Generally a lower relaxation factor and higher number of
iterations is numerically more stable.
convergence : float, optional
Convergence criterion for the iteration process. Smaller numbers
result in more smoothing iterations. Range from (0 to 1).
edge_angle : float, optional
Edge angle to control smoothing along edges (either interior or boundary).
feature_angle : float, optional
Feature angle for sharp edge identification.
boundary_smoothing : bool, optional
Boolean flag to control smoothing of boundary edges.
feature_smoothing : bool, optional
Boolean flag to control smoothing of feature edges.
inplace : bool, optional
Updates mesh in-place.
Returns
-------
mesh : pyvista.PolyData
Smoothed mesh.
Examples
--------
Smooth the edges of an all triangular cube
>>> import pyvista as pv
>>> cube = pv.Cube().triangulate().subdivide(5).clean()
>>> smooth_cube = cube.smooth(1000, feature_smoothing=False)
>>> n_edge_cells = cube.extract_feature_edges().n_cells
>>> n_smooth_cells = smooth_cube.extract_feature_edges().n_cells
>>> print(f'Sharp Edges on Cube: {n_edge_cells}')
Sharp Edges on Cube: 384
>>> print(f'Sharp Edges on Smooth Cube: {n_smooth_cells}')
Sharp Edges on Smooth Cube: 12
"""
alg = _vtk.vtkSmoothPolyDataFilter()
alg.SetInputData(poly_data)
alg.SetNumberOfIterations(n_iter)
alg.SetConvergence(convergence)
alg.SetFeatureEdgeSmoothing(feature_smoothing)
alg.SetFeatureAngle(feature_angle)
alg.SetEdgeAngle(edge_angle)
alg.SetBoundarySmoothing(boundary_smoothing)
alg.SetRelaxationFactor(relaxation_factor)
alg.Update()
mesh = _get_output(alg)
if inplace:
poly_data.overwrite(mesh)
return poly_data
else:
return mesh
def decimate_pro(poly_data, reduction, feature_angle=45.0, split_angle=75.0, splitting=True,
pre_split_mesh=False, preserve_topology=False, inplace=False):
"""Reduce the number of triangles in a triangular mesh.
It forms a good approximation to the original geometry. Based on the algorithm
originally described in "Decimation of Triangle Meshes", Proc Siggraph 92.
Parameters
----------
reduction : float
Reduction factor. A value of 0.9 will leave 10 % of the original number
of vertices.
feature_angle : float, optional
Angle used to define what an edge is (i.e., if the surface normal between
two adjacent triangles is >= feature_angle, an edge exists).
split_angle : float, optional
Angle used to control the splitting of the mesh. A split line exists
when the surface normals between two edge connected triangles are >= split_angle.
splitting : bool, optional
Controls the splitting of the mesh at corners, along edges, at non-manifold
points, or anywhere else a split is required. Turning splitting off
will better preserve the original topology of the mesh, but may not
necessarily give the exact requested decimation.
pre_split_mesh : bool, optional
Separates the mesh into semi-planar patches, which are disconnected
from each other. This can give superior results in some cases. If pre_split_mesh
is set to True, the mesh is split with the specified split_angle. Otherwise
mesh splitting is deferred as long as possible.
preserve_topology : bool, optional
Controls topology preservation. If on, mesh splitting and hole elimination
will not occur. This may limit the maximum reduction that may be achieved.
inplace : bool, optional
Updates mesh in-place.
Returns
-------
mesh : pyvista.PolyData
Decimated mesh.
"""
alg = _vtk.vtkDecimatePro()
alg.SetInputData(poly_data)
alg.SetTargetReduction(reduction)
alg.SetPreserveTopology(preserve_topology)
alg.SetFeatureAngle(feature_angle)
alg.SetSplitting(splitting)
alg.SetSplitAngle(split_angle)
alg.SetPreSplitMesh(pre_split_mesh)
alg.Update()
mesh = _get_output(alg)
if inplace:
poly_data.overwrite(mesh)
return poly_data
else:
return mesh
def tube(poly_data, radius=None, scalars=None, capping=True, n_sides=20,
radius_factor=10, preference='point', inplace=False):
"""Generate a tube around each input line.
The radius of the tube can be set to linearly vary with a scalar value.
Parameters
----------
radius : float
Minimum tube radius (minimum because the tube radius may vary).
scalars : str, optional
scalars array by which the radius varies
capping : bool, optional
Turn on/off whether to cap the ends with polygons. Default ``True``.
n_sides : int, optional
Set the number of sides for the tube. Minimum of 3.
radius_factor : float, optional
Maximum tube radius in terms of a multiple of the minimum radius.
preference : str, optional
The field preference when searching for the scalars array by name.
inplace : bool, optional
Updates mesh in-place.
Returns
-------
mesh : pyvista.PolyData
Tube-filtered mesh.
Examples
--------
Convert a single line to a tube
>>> import pyvista as pv
>>> line = pv.Line()
>>> tube = line.tube(radius=0.02)
>>> print('Line Cells:', line.n_cells)
Line Cells: 1
>>> print('Tube Cells:', tube.n_cells)
Tube Cells: 22
"""
if not isinstance(poly_data, pyvista.PolyData):
poly_data = pyvista.PolyData(poly_data)
if n_sides < 3:
n_sides = 3
tube = _vtk.vtkTubeFilter()
tube.SetInputDataObject(poly_data)
# User Defined Parameters
tube.SetCapping(capping)
if radius is not None:
tube.SetRadius(radius)
tube.SetNumberOfSides(n_sides)
tube.SetRadiusFactor(radius_factor)
# Check if scalars array given
if scalars is not None:
if not isinstance(scalars, str):
raise TypeError('scalars array must be given as a string name')
_, field = poly_data.get_array(scalars, preference=preference, info=True)
# args: (idx, port, connection, field, name)
tube.SetInputArrayToProcess(0, 0, 0, field.value, scalars)
tube.SetVaryRadiusToVaryRadiusByScalar()
# Apply the filter
tube.Update()
mesh = _get_output(tube)
if inplace:
poly_data.overwrite(mesh)
return poly_data
else:
return mesh
def subdivide(poly_data, nsub, subfilter='linear', inplace=False):
"""Increase the number of triangles in a single, connected triangular mesh.
Uses one of the following vtk subdivision filters to subdivide a mesh.
vtkButterflySubdivisionFilter
vtkLoopSubdivisionFilter
vtkLinearSubdivisionFilter
Linear subdivision results in the fastest mesh subdivision,
but it does not smooth mesh edges, but rather splits each
triangle into 4 smaller triangles.
Butterfly and loop subdivision perform smoothing when
dividing, and may introduce artifacts into the mesh when
dividing.
Subdivision filter appears to fail for multiple part meshes.
Should be one single mesh.
Parameters
----------
nsub : int
Number of subdivisions. Each subdivision creates 4 new
triangles, so the number of resulting triangles is
``nface*4**nsub`` where ``nface`` is the current number of
faces.
subfilter : string, optional
Can be one of the following: 'butterfly', 'loop', 'linear'.
inplace : bool, optional
Updates mesh in-place. Default ``False``.
Returns
-------
mesh : Polydata object
``pyvista`` polydata object.
Examples
--------
>>> from pyvista import examples
>>> import pyvista
>>> mesh = pyvista.PolyData(examples.planefile)
>>> submesh = mesh.subdivide(1, 'loop')
Alternatively, update the mesh in-place.
>>> submesh = mesh.subdivide(1, 'loop', inplace=True)
"""
subfilter = subfilter.lower()
if subfilter == 'linear':
sfilter = _vtk.vtkLinearSubdivisionFilter()
elif subfilter == 'butterfly':
sfilter = _vtk.vtkButterflySubdivisionFilter()
elif subfilter == 'loop':
sfilter = _vtk.vtkLoopSubdivisionFilter()
else:
raise ValueError("Subdivision filter must be one of the following: "
"'butterfly', 'loop', or 'linear'")
# Subdivide
sfilter.SetNumberOfSubdivisions(nsub)
sfilter.SetInputData(poly_data)
sfilter.Update()
submesh = _get_output(sfilter)
if inplace:
poly_data.overwrite(submesh)
return poly_data
else:
return submesh
def subdivide_adaptive(poly_data, max_edge_len=None, max_tri_area=None,
max_n_tris=None, max_n_passes=None, inplace=False):
"""Increase the number of triangles in a triangular mesh based on edge and/or area metrics.
This filter uses a simple case-based, multi-pass approach to
repeatedly subdivide the input triangle mesh to meet the area
and/or edge length criteria. New points may be inserted only
on edges; depending on the number of edges to be subdivided a
different number of triangles are inserted ranging from two
(i.e., two triangles replace the original one) to four.
Point and cell data is treated as follows: The cell data from
a parent triangle is assigned to its subdivided
children. Point data is interpolated along edges as the edges
are subdivided.
This filter retains mesh watertightness if the mesh was
originally watertight; and the area and max triangles criteria
are not used.
Parameters
----------
max_edge_len : float, optional
The maximum edge length that a triangle may have. Edges
longer than this value are split in half and the
associated triangles are modified accordingly.
max_tri_area : float, optional
The maximum area that a triangle may have. Triangles
larger than this value are subdivided to meet this
threshold. Note that if this criterion is used it may
produce non-watertight meshes as a result.
max_n_tris : int, optional
The maximum number of triangles that can be created. If
the limit is hit, it may result in premature termination
of the algorithm and the results may be less than
satisfactory (for example non-watertight meshes may be
created). By default, the limit is set to a very large
number (i.e., no effective limit).
max_n_passes : int, optional
The maximum number of passes (i.e., levels of
subdivision). If the limit is hit, then the subdivision
process stops and additional passes (needed to meet other
criteria) are aborted. The default limit is set to a very
large number (i.e., no effective limit).
inplace : bool, optional
Updates mesh in-place.
Returns
-------
:class:`pyvista.PolyData`
Subdivided mesh
Examples
--------
>>> from pyvista import examples
>>> import pyvista
>>> mesh = pyvista.PolyData(examples.planefile)
>>> submesh = mesh.subdivide_adaptive(max_n_passes=2)
Alternatively, update the mesh in-place.
>>> submesh = mesh.subdivide_adaptive(max_n_passes=2, inplace=True)
"""
sfilter = _vtk.vtkAdaptiveSubdivisionFilter()
if max_edge_len:
sfilter.SetMaximumEdgeLength(max_edge_len)
if max_tri_area:
sfilter.SetMaximumTriangleArea(max_tri_area)
if max_n_tris:
sfilter.SetMaximumNumberOfTriangles(max_n_tris)
if max_n_passes:
sfilter.SetMaximumNumberOfPasses(max_n_passes)
sfilter.SetInputData(poly_data)
sfilter.Update()
submesh = _get_output(sfilter)
if inplace:
poly_data.overwrite(submesh)
return poly_data
else:
return submesh
def decimate(poly_data, target_reduction, volume_preservation=False,
attribute_error=False, scalars=True, vectors=True,
normals=False, tcoords=True, tensors=True, scalars_weight=0.1,
vectors_weight=0.1, normals_weight=0.1, tcoords_weight=0.1,
tensors_weight=0.1, inplace=False, progress_bar=False):
"""Reduce the number of triangles in a triangular mesh using vtkQuadricDecimation.
Parameters
----------
mesh : vtk.PolyData
Mesh to decimate
target_reduction : float
Fraction of the original mesh to remove.
TargetReduction is set to 0.9, this filter will try to reduce
the data set to 10% of its original size and will remove 90%
of the input triangles.
volume_preservation : bool, optional
Decide whether to activate volume preservation which greatly reduces
errors in triangle normal direction. If off, volume preservation is
disabled and if AttributeErrorMetric is active, these errors can be
large. Defaults to False.
attribute_error : bool, optional
Decide whether to include data attributes in the error metric. If
off, then only geometric error is used to control the decimation.
Defaults to False.
scalars : bool, optional
If attribute errors are to be included in the metric (i.e.,
AttributeErrorMetric is on), then the following flags control which
attributes are to be included in the error calculation. Defaults to
True.
vectors : bool, optional
See scalars parameter. Defaults to True.
normals : bool, optional
See scalars parameter. Defaults to False.
tcoords : bool, optional
See scalars parameter. Defaults to True.
tensors : bool, optional
See scalars parameter. Defaults to True.
scalars_weight : float, optional
The scaling weight contribution of the scalar attribute. These
values are used to weight the contribution of the attributes towards
the error metric. Defaults to 0.1.
vectors_weight : float, optional
See scalars weight parameter. Defaults to 0.1.
normals_weight : float, optional
See scalars weight parameter. Defaults to 0.1.
tcoords_weight : float, optional
See scalars weight parameter. Defaults to 0.1.
tensors_weight : float, optional
See scalars weight parameter. Defaults to 0.1.
inplace : bool, optional
Updates mesh in-place.
progress_bar : bool, optional
Display a progress bar to indicate progress.
Returns
-------
outmesh : pyvista.PolyData
Decimated mesh.
Examples
--------
Decimate a sphere while preserving its volume
>>> import pyvista as pv
>>> sphere = pv.Sphere(theta_resolution=90, phi_resolution=90)
>>> print(sphere.n_cells)
15840
>>> dec_sphere = sphere.decimate(0.9, volume_preservation=True)
>>> print(dec_sphere.n_cells)
1584
Notes
-----
If you encounter a segmentation fault or other error, consider
using ``clean`` to remove any invalid cells before using this
filter.
"""
# create decimation filter
alg = _vtk.vtkQuadricDecimation() # vtkDecimatePro as well
alg.SetVolumePreservation(volume_preservation)
alg.SetAttributeErrorMetric(attribute_error)
alg.SetScalarsAttribute(scalars)
alg.SetVectorsAttribute(vectors)
alg.SetNormalsAttribute(normals)
alg.SetTCoordsAttribute(tcoords)
alg.SetTensorsAttribute(tensors)
alg.SetScalarsWeight(scalars_weight)
alg.SetVectorsWeight(vectors_weight)
alg.SetNormalsWeight(normals_weight)
alg.SetTCoordsWeight(tcoords_weight)
alg.SetTensorsWeight(tensors_weight)
alg.SetTargetReduction(target_reduction)
alg.SetInputData(poly_data)
_update_alg(alg, progress_bar, 'Decimating')
mesh = _get_output(alg)
if inplace:
poly_data.overwrite(mesh)
return poly_data
else:
return mesh
def compute_normals(poly_data, cell_normals=True,
point_normals=True, split_vertices=False,
flip_normals=False, consistent_normals=True,
auto_orient_normals=False,
non_manifold_traversal=True,
feature_angle=30.0, inplace=False):
"""Compute point and/or cell normals for a mesh.
The filter can reorder polygons to insure consistent
orientation across polygon neighbors. Sharp edges can be split
and points duplicated with separate normals to give crisp
(rendered) surface definition. It is also possible to globally
flip the normal orientation.
The algorithm works by determining normals for each polygon
and then averaging them at shared points. When sharp edges are
present, the edges are split and new points generated to
prevent blurry edges (due to Gouraud shading).
Parameters
----------
cell_normals : bool, optional
Calculation of cell normals. Defaults to ``True``.
point_normals : bool, optional
Calculation of point normals. Defaults to ``True``.
split_vertices : bool, optional
Splitting of sharp edges. Defaults to ``False``.
flip_normals : bool, optional
Set global flipping of normal orientation. Flipping
modifies both the normal direction and the order of a
cell's points. Defaults to ``False``.
consistent_normals : bool, optional
Enforcement of consistent polygon ordering. Defaults to ``True``.
auto_orient_normals : bool, optional
Turn on/off the automatic determination of correct normal
orientation. NOTE: This assumes a completely closed
surface (i.e. no boundary edges) and no non-manifold
edges. If these constraints do not hold, all bets are
off. This option adds some computational complexity, and
is useful if you do not want to have to inspect the
rendered image to determine whether to turn on the
``flip_normals`` flag. However, this flag can work with
the ``flip_normals`` flag, and if both are set, all the
normals in the output will point "inward". Defaults to
``False``.
non_manifold_traversal : bool, optional
Turn on/off traversal across non-manifold edges. Changing
this may prevent problems where the consistency of
polygonal ordering is corrupted due to topological
loops. Defaults to ``True``.
feature_angle : float, optional
The angle that defines a sharp edge. If the difference in
angle across neighboring polygons is greater than this
value, the shared edge is considered "sharp". Defaults to
30.0.
inplace : bool, optional
Updates mesh in-place. Defaults to ``False``.
Returns
-------
mesh : pyvista.PolyData
Updated mesh with cell and point normals.
Examples
--------
Compute the point normals of the surface of a sphere.
>>> import pyvista as pv
>>> sphere = pv.Sphere()
>>> sphere = sphere.compute_normals(cell_normals=False)
>>> normals = sphere['Normals']
>>> normals.shape
(842, 3)
Alternatively, create a new mesh when computing the normals
and compute both cell and point normals.
>>> import pyvista as pv
>>> sphere = pv.Sphere()
>>> sphere_with_norm = sphere.compute_normals()
>>> sphere_with_norm.point_arrays['Normals'].shape
(842, 3)
>>> sphere_with_norm.cell_arrays['Normals'].shape
(1680, 3)
Notes
-----
Previous arrays named "Normals" will be overwritten.
Normals are computed only for polygons and triangle
strips. Normals are not computed for lines or vertices.
Triangle strips are broken up into triangle polygons. You may
want to restrip the triangles.
May be easier to run ``mesh.point_normals`` or ``mesh.cell_normals``.
"""
normal = _vtk.vtkPolyDataNormals()
normal.SetComputeCellNormals(cell_normals)
normal.SetComputePointNormals(point_normals)
normal.SetSplitting(split_vertices)
normal.SetFlipNormals(flip_normals)
normal.SetConsistency(consistent_normals)
normal.SetAutoOrientNormals(auto_orient_normals)
normal.SetNonManifoldTraversal(non_manifold_traversal)
normal.SetFeatureAngle(feature_angle)
normal.SetInputData(poly_data)
normal.Update()
mesh = _get_output(normal)
if point_normals:
mesh.GetPointData().SetActiveNormals('Normals')
if cell_normals:
mesh.GetCellData().SetActiveNormals('Normals')
if inplace:
poly_data.overwrite(mesh)
return poly_data
else:
return mesh
def clip_closed_surface(poly_data, normal='x', origin=None,
tolerance=1e-06, inplace=False):
"""Clip a closed polydata surface with a plane.
This currently only supports one plane but could be
implemented to handle a plane collection.
It will produce a new closed surface by creating new polygonal
faces where the input data was clipped.
Non-manifold surfaces should not be used as input for this
filter. The input surface should have no open edges, and must
not have any edges that are shared by more than two faces. In
addition, the input surface should not self-intersect, meaning
that the faces of the surface should only touch at their
edges.
Parameters
----------
normal : str, list, optional
Plane normal to clip with. Plane is centered at
``origin``. Normal can be either a 3 member list
(e.g. ``[0, 0, 1]``) or one of the following strings:
``'x'``, ``'y'``, ``'z'``, ``'-x'``, ``'-y'``, or
``'-z'``.
origin : list, optional
Coordinate of the origin (e.g. ``[1, 0, 0]``). Defaults
to the center of the mesh.
tolerance : float, optional
The tolerance for creating new points while clipping. If
the tolerance is too small, then degenerate triangles
might be produced.
inplace : bool, optional
Updates mesh in-place. Defaults to ``False``.
Returns
-------
clipped_mesh : pyvista.PolyData
The clipped mesh.
Examples
--------
Clip a sphere in the X direction centered at the origin. This
will leave behind half a sphere in the positive X direction.
>>> import pyvista as pv
>>> sphere = pv.Sphere()
>>> clipped_mesh = sphere.clip_closed_surface()
Clip the sphere at the xy plane and leave behind half the
sphere in the positive Z direction. Shift the clip upwards to
leave a smaller mesh behind.
>>> clipped_mesh = sphere.clip_closed_surface('z', origin=[0, 0, 0.3])
"""
# verify it is manifold
if poly_data.n_open_edges > 0:
raise ValueError("This surface appears to be non-manifold.")
if isinstance(normal, str):
normal = NORMALS[normal.lower()]
# find center of data if origin not specified
if origin is None:
origin = poly_data.center
# create the plane for clipping
plane = generate_plane(normal, origin)
collection = _vtk.vtkPlaneCollection()
collection.AddItem(plane)
alg = _vtk.vtkClipClosedSurface()
alg.SetGenerateFaces(True)
alg.SetInputDataObject(poly_data)
alg.SetTolerance(tolerance)
alg.SetClippingPlanes(collection)
alg.Update() # Perform the Cut
result = _get_output(alg)
if inplace:
poly_data.overwrite(result)
return poly_data
else:
return result
def fill_holes(poly_data, hole_size, inplace=False, progress_bar=False): # pragma: no cover
"""
Fill holes in a pyvista.PolyData or vtk.vtkPolyData object.
Holes are identified by locating boundary edges, linking them together
into loops, and then triangulating the resulting loops. Note that you
can specify an approximate limit to the size of the hole that can be
filled.
Parameters
----------
hole_size : float
Specifies the maximum hole size to fill. This is represented as a
radius to the bounding circumsphere containing the hole. Note that
this is an approximate area; the actual area cannot be computed
without first triangulating the hole.
inplace : bool, optional
Return new mesh or overwrite input.
progress_bar : bool, optional
Display a progress bar to indicate progress.
Returns
-------
mesh : pyvista.PolyData
Mesh with holes filled.
Examples
--------
Create a partial sphere with a hole and then fill it
>>> import pyvista as pv
>>> sphere_with_hole = pv.Sphere(end_theta=330)
>>> sphere = sphere_with_hole.fill_holes(1000)
>>> edges = sphere.extract_feature_edges(feature_edges=False,
... manifold_edges=False)
>>> assert edges.n_cells == 0
"""
logging.warning('pyvista.PolyData.fill_holes is known to segfault. '
'Use at your own risk')
alg = _vtk.vtkFillHolesFilter()
alg.SetHoleSize(hole_size)
alg.SetInputData(poly_data)
_update_alg(alg, progress_bar, 'Filling Holes')
mesh = _get_output(alg)
if inplace:
poly_data.overwrite(mesh)
return poly_data
else:
return mesh
def clean(poly_data, point_merging=True, tolerance=None, lines_to_points=True,
polys_to_lines=True, strips_to_polys=True, inplace=False,
absolute=True, progress_bar=False, **kwargs):
"""Clean the mesh.
This merges duplicate points, removes unused points, and/or
removes degenerate cells.
Parameters
----------
point_merging : bool, optional
Enables point merging. On by default.
tolerance : float, optional
Set merging tolerance. When enabled merging is set to
absolute distance. If ``absolute`` is ``False``, then the
merging tolerance is a fraction of the bounding box
length. The alias ``merge_tol`` is also excepted.
lines_to_points : bool, optional
Turn on/off conversion of degenerate lines to points.
Enabled by default.
polys_to_lines : bool, optional
Turn on/off conversion of degenerate polys to lines.
Enabled by default.
strips_to_polys : bool, optional
Turn on/off conversion of degenerate strips to polys.
inplace : bool, optional
Updates mesh in-place. Default ``False``.
absolute : bool, optional
Control if ``tolerance`` is an absolute distance or a
fraction.
progress_bar : bool, optional
Display a progress bar to indicate progress.
Returns
-------
mesh : pyvista.PolyData
Cleaned mesh.
Examples
--------
Create a mesh with a degenerate face and then clean it,
removing the degenerate face
>>> import pyvista as pv
>>> import numpy as np
>>> points = np.array([[0, 0, 0], [0, 1, 0], [1, 0, 0]])
>>> faces = np.array([3, 0, 1, 2, 3, 0, 3, 3])
>>> mesh = pv.PolyData(points, faces)
>>> mout = mesh.clean()
>>> print(mout.faces) # doctest:+SKIP
[3 0 1 2]
"""
if tolerance is None:
tolerance = kwargs.pop('merge_tol', None)
assert_empty_kwargs(**kwargs)
alg = _vtk.vtkCleanPolyData()
alg.SetPointMerging(point_merging)
alg.SetConvertLinesToPoints(lines_to_points)
alg.SetConvertPolysToLines(polys_to_lines)
alg.SetConvertStripsToPolys(strips_to_polys)
if isinstance(tolerance, (int, float)):
if absolute:
alg.ToleranceIsAbsoluteOn()
alg.SetAbsoluteTolerance(tolerance)
else:
alg.SetTolerance(tolerance)
alg.SetInputData(poly_data)
_update_alg(alg, progress_bar, 'Cleaning')
output = _get_output(alg)
# Check output so no segfaults occur
if output.n_points < 1:
raise ValueError('Clean tolerance is too high. Empty mesh returned.')
if inplace:
poly_data.overwrite(output)
return poly_data
else:
return output
def geodesic(poly_data, start_vertex, end_vertex, inplace=False,
keep_order=True):
"""Calculate the geodesic path between two vertices using Dijkstra's algorithm.
This will add an array titled ``'vtkOriginalPointIds'`` of the input
mesh's point ids to the output mesh. The default behavior of the
underlying ``vtkDijkstraGraphGeodesicPath`` filter is that the
geodesic path is reversed in the resulting mesh. This is overridden
in PyVista by default.
Parameters
----------
start_vertex : int
Vertex index indicating the start point of the geodesic segment.
end_vertex : int
Vertex index indicating the end point of the geodesic segment.
inplace : bool, optional
Whether the input mesh should be replaced with the path. The
geodesic path is always returned.
keep_order : bool, optional
If ``True``, the points of the returned path are guaranteed
to start with the start vertex (as opposed to the end vertex).
.. versionadded:: 0.32.0
Returns
-------
output : pyvista.PolyData
``PolyData`` object consisting of the line segment between the
two given vertices. If ``inplace`` is ``True`` this is the
same object as the input mesh.
Examples
--------
Plot the path between two points on a sphere.
>>> import pyvista as pv
>>> sphere = pv.Sphere()
>>> path = sphere.geodesic(0, 100)
>>> pl = pv.Plotter()
>>> actor = pl.add_mesh(sphere)
>>> actor = pl.add_mesh(path, line_width=5, color='k')
>>> cpos = pl.show()
"""
if not (0 <= start_vertex < poly_data.n_points and
0 <= end_vertex < poly_data.n_points):
raise IndexError('Invalid point indices.')
if not poly_data.is_all_triangles():
raise NotAllTrianglesError("Input mesh for geodesic path must be all triangles.")
dijkstra = _vtk.vtkDijkstraGraphGeodesicPath()
dijkstra.SetInputData(poly_data)
dijkstra.SetStartVertex(start_vertex)
dijkstra.SetEndVertex(end_vertex)
dijkstra.Update()
original_ids = vtk_id_list_to_array(dijkstra.GetIdList())
output = _get_output(dijkstra)
output["vtkOriginalPointIds"] = original_ids
# Do not copy textures from input
output.clear_textures()
# ensure proper order if requested
if keep_order and original_ids[0] == end_vertex:
output.points[...] = output.points[::-1, :]
output["vtkOriginalPointIds"] = output["vtkOriginalPointIds"][::-1]
if inplace:
poly_data.overwrite(output)
return poly_data
else:
return output
def geodesic_distance(poly_data, start_vertex, end_vertex):
"""Calculate the geodesic distance between two vertices using Dijkstra's algorithm.
Parameters
----------
start_vertex : int
Vertex index indicating the start point of the geodesic segment.
end_vertex : int
Vertex index indicating the end point of the geodesic segment.
Returns
-------
length : float
Length of the geodesic segment.
Examples
--------
>>> import pyvista as pv
>>> sphere = pv.Sphere()
>>> length = sphere.geodesic_distance(0, 100)
>>> print(f'Length is {length:.3f}')
Length is 0.812
"""
path = poly_data.geodesic(start_vertex, end_vertex)
sizes = path.compute_cell_sizes(length=True, area=False, volume=False)
distance = np.sum(sizes['Length'])
del path
del sizes
return distance
def ray_trace(poly_data, origin, end_point, first_point=False, plot=False,
off_screen=False):
"""Perform a single ray trace calculation.
This requires a mesh and a line segment defined by an origin
and end_point.
Parameters
----------
origin : np.ndarray or list
Start of the line segment.
end_point : np.ndarray or list
End of the line segment.
first_point : bool, optional
Returns intersection of first point only.
plot : bool, optional
Plots ray trace results
off_screen : bool, optional
Plots off screen when ``plot=True``. Used for unit testing.
Returns
-------
intersection_points : np.ndarray
Location of the intersection points. Empty array if no
intersections.
intersection_cells : np.ndarray
Indices of the intersection cells. Empty array if no
intersections.
Examples
--------
Compute the intersection between a ray from the origin and
[1, 0, 0] and a sphere with radius 0.5 centered at the origin
>>> import pyvista as pv
>>> sphere = pv.Sphere()
>>> point, cell = sphere.ray_trace([0, 0, 0], [1, 0, 0], first_point=True)
>>> print(f'Intersected at {point[0]:.3f} {point[1]:.3f} {point[2]:.3f}')
Intersected at 0.499 0.000 0.000
"""
points = _vtk.vtkPoints()
cell_ids = _vtk.vtkIdList()
poly_data.obbTree.IntersectWithLine(np.array(origin),
np.array(end_point),
points, cell_ids)
intersection_points = _vtk.vtk_to_numpy(points.GetData())
if first_point and intersection_points.shape[0] >= 1:
intersection_points = intersection_points[0]
intersection_cells = []
if intersection_points.any():
if first_point:
ncells = 1
else:
ncells = cell_ids.GetNumberOfIds()
for i in range(ncells):
intersection_cells.append(cell_ids.GetId(i))
intersection_cells = np.array(intersection_cells)
if plot:
plotter = pyvista.Plotter(off_screen=off_screen)
plotter.add_mesh(poly_data, label='Test Mesh')
segment = np.array([origin, end_point])
plotter.add_lines(segment, 'b', label='Ray Segment')
plotter.add_mesh(intersection_points, 'r', point_size=10,
label='Intersection Points')
plotter.add_legend()
plotter.add_axes()
plotter.show()
return intersection_points, intersection_cells
def multi_ray_trace(poly_data, origins, directions, first_point=False, retry=False):
"""Perform multiple ray trace calculations.
This requires a mesh with only triangular faces,
an array of origin points and an equal sized array of
direction vectors to trace along.
The embree library used for vectorisation of the ray traces is known to occasionally
return no intersections where the VTK implementation would return an intersection.
If the result appears to be missing some intersection points, set retry=True to run a second pass over rays
that returned no intersections, using the VTK ray_trace implementation.
Parameters
----------
origins : np.ndarray or list
Starting point for each trace.
directions : np.ndarray or list
Direction vector for each trace.
first_point : bool, optional
Returns intersection of first point only.
retry : bool, optional
Will retry rays that return no intersections using the ray_trace
Returns
-------
intersection_points : np.ndarray
Location of the intersection points. Empty array if no
intersections.
intersection_rays : np.ndarray
Indices of the ray for each intersection point. Empty array if no
intersections.
intersection_cells : np.ndarray
Indices of the intersection cells. Empty array if no
intersections.
Examples
--------
Compute the intersection between rays from the origin in
directions ``[1, 0, 0]``, ``[0, 1, 0]`` and ``[0, 0, 1]``, and
a sphere with radius 0.5 centered at the origin
>>> import pyvista as pv # doctest: +SKIP
... sphere = pv.Sphere()
... points, rays, cells = sphere.multi_ray_trace([[0, 0, 0]]*3, [[1, 0, 0], [0, 1, 0], [0, 0, 1]], first_point=True)
... string = ", ".join([f"({point[0]:.3f}, {point[1]:.3f}, {point[2]:.3f})" for point in points])
... print(f'Rays intersected at {string}')
Rays intersected at (0.499, 0.000, 0.000), (0.000, 0.497, 0.000), (0.000, 0.000, 0.500)
"""
if not poly_data.is_all_triangles():
raise NotAllTrianglesError
try:
import trimesh, rtree, pyembree
except (ModuleNotFoundError, ImportError):
raise ImportError(
"To use multi_ray_trace please install trimesh, rtree and pyembree with:\n"
"\tconda install trimesh rtree pyembree"
)
origins = np.asarray(origins)
directions = np.asarray(directions)
faces_as_array = poly_data.faces.reshape((poly_data.n_faces, 4))[:, 1:]
tmesh = trimesh.Trimesh(poly_data.points, faces_as_array)
locations, index_ray, index_tri = tmesh.ray.intersects_location(
origins, directions, multiple_hits=not first_point
)
if retry:
# gather intersecting rays in lists
loc_lst, ray_lst, tri_lst = [arr.tolist() for arr in [locations, index_ray, index_tri]]
# find indices that trimesh failed on
all_ray_indices = np.arange(len(origins))
retry_ray_indices = np.setdiff1d(all_ray_indices, index_ray, assume_unique=True)
# compute ray points for all failed rays at once
origins_retry = origins[retry_ray_indices, :] # shape (n_retry, 3)
directions_retry = directions[retry_ray_indices, :]
unit_directions = directions_retry / np.linalg.norm(directions_retry,
axis=1, keepdims=True)
second_points = origins_retry + unit_directions * poly_data.length # shape (n_retry, 3)
for id_r, origin, second_point in zip(retry_ray_indices, origins_retry, second_points):
locs, indices = poly_data.ray_trace(origin, second_point, first_point=first_point)
if locs.any():
if first_point:
locs = locs.reshape([1, 3])
ray_lst.extend([id_r] * indices.size)
tri_lst.extend(indices)
loc_lst.extend(locs)
# sort result arrays by ray index
index_ray = np.array(ray_lst)
sorting_inds = index_ray.argsort()
index_ray = index_ray[sorting_inds]
index_tri = np.array(tri_lst)[sorting_inds]
locations = np.array(loc_lst)[sorting_inds]
return locations, index_ray, index_tri
def plot_boundaries(poly_data, edge_color="red", **kwargs):
"""Plot boundaries of a mesh.
Parameters
----------
edge_color : str, optional
The color of the edges when they are added to the plotter.
kwargs : optional
All additional keyword arguments will be passed to
:func:`pyvista.BasePlotter.add_mesh`
"""
edges = DataSetFilters.extract_feature_edges(poly_data)
plotter = pyvista.Plotter(off_screen=kwargs.pop('off_screen', None),
notebook=kwargs.pop('notebook', None))
plotter.add_mesh(edges, color=edge_color, style='wireframe', label='Edges')
plotter.add_mesh(poly_data, label='Mesh', **kwargs)
plotter.add_legend()
return plotter.show()
def plot_normals(poly_data, show_mesh=True, mag=1.0, flip=False,
use_every=1, **kwargs):
"""Plot the point normals of a mesh.
Parameters
----------
show_mesh : bool, optional
Plot the mesh itself. Defaults to ``True``.
mag : float, optional
Size magnitude of the normal arrows. Defaults to 1.0.
flip : bool, optional
Flip the normal direction when ``True``. Default
``False``.
use_every : int, optional
Display every nth normal. By default every normal is
displayed. Display every 10th normal by setting this
parameter to 10.
Examples
--------
Plot the normals of a sphere.
>>> import pyvista as pv
>>> sphere = pv.Sphere()
>>> cpos = sphere.plot_normals(mag=0.1)
"""
plotter = pyvista.Plotter(off_screen=kwargs.pop('off_screen', None),
notebook=kwargs.pop('notebook', None))
if show_mesh:
plotter.add_mesh(poly_data, **kwargs)
normals = poly_data.point_normals
if flip:
normals *= -1
plotter.add_arrows(poly_data.points[::use_every],
normals[::use_every], mag=mag, show_scalar_bar=False)
return plotter.show()
def remove_points(poly_data, remove, mode='any', keep_scalars=True, inplace=False):
"""Rebuild a mesh by removing points.
Only valid for all-triangle meshes.
Parameters
----------
remove : np.ndarray
If remove is a bool array, points that are ``True`` will
be removed. Otherwise, it is treated as a list of
indices.
mode : str, optional
When ``'all'``, only faces containing all points flagged
for removal will be removed. Default ``'any'``.
keep_scalars : bool, optional
When ``True``, point and cell scalars will be passed on to
the new mesh.
inplace : bool, optional
Updates mesh in-place. Defaults to ``False``.
Returns
-------
mesh : pyvista.PolyData
Mesh without the points flagged for removal.
ridx : np.ndarray
Indices of new points relative to the original mesh.
Examples
--------
Remove the first 100 points from a sphere.
>>> import pyvista as pv
>>> sphere = pv.Sphere()
>>> reduced_sphere, ridx = sphere.remove_points(range(100))
"""
remove = np.asarray(remove)
# np.asarray will eat anything, so we have to weed out bogus inputs
if not issubclass(remove.dtype.type, (np.bool_, np.integer)):
raise TypeError('Remove must be either a mask or an integer array-like')
if remove.dtype == np.bool_:
if remove.size != poly_data.n_points:
raise ValueError('Mask different size than n_points')
remove_mask = remove
else:
remove_mask = np.zeros(poly_data.n_points, np.bool_)
remove_mask[remove] = True
if not poly_data.is_all_triangles():
raise NotAllTrianglesError
f = poly_data.faces.reshape(-1, 4)[:, 1:]
vmask = remove_mask.take(f)
if mode == 'all':
fmask = ~(vmask).all(1)
else:
fmask = ~(vmask).any(1)
# Regenerate face and point arrays
uni = np.unique(f.compress(fmask, 0), return_inverse=True)
new_points = poly_data.points.take(uni[0], 0)
nfaces = fmask.sum()
faces = np.empty((nfaces, 4), dtype=pyvista.ID_TYPE)
faces[:, 0] = 3
faces[:, 1:] = np.reshape(uni[1], (nfaces, 3))
newmesh = pyvista.PolyData(new_points, faces, deep=True)
ridx = uni[0]
# Add scalars back to mesh if requested
if keep_scalars:
for key in poly_data.point_arrays:
newmesh.point_arrays[key] = poly_data.point_arrays[key][ridx]
for key in poly_data.cell_arrays:
try:
newmesh.cell_arrays[key] = poly_data.cell_arrays[key][fmask]
except:
logging.warning(f'Unable to pass cell key {key} onto reduced mesh')
# Return vtk surface and reverse indexing array
if inplace:
poly_data.overwrite(newmesh)
return poly_data, ridx
else:
return newmesh, ridx
def flip_normals(poly_data):
"""Flip normals of a triangular mesh by reversing the point ordering.
Examples
--------
Flip the normals of a sphere and plot the normals before and
after the flip.
>>> import pyvista as pv
>>> sphere = pv.Sphere()
>>> cpos = sphere.plot_normals(mag=0.1)
>>> sphere.flip_normals()
>>> cpos = sphere.plot_normals(mag=0.1)
"""
if not poly_data.is_all_triangles:
raise NotAllTrianglesError('Can only flip normals on an all triangle mesh')
f = poly_data.faces.reshape((-1, 4))
f[:, 1:] = f[:, 1:][:, ::-1]
poly_data.faces = f
def delaunay_2d(poly_data, tol=1e-05, alpha=0.0, offset=1.0, bound=False,
inplace=False, edge_source=None, progress_bar=False):
"""Apply a delaunay 2D filter along the best fitting plane.
Parameters
----------
tol : float, optional
Specify a tolerance to control discarding of closely
spaced points. This tolerance is specified as a fraction
of the diagonal length of the bounding box of the points.
Defaults to ``1e-05``.
alpha : float, optional
Specify alpha (or distance) value to control output of
this filter. For a non-zero alpha value, only edges or
triangles contained within a sphere centered at mesh
vertices will be output. Otherwise, only triangles will be
output. Defaults to ``0.0``.
offset : float, optional
Specify a multiplier to control the size of the initial,
bounding Delaunay triangulation. Defaults to ``1.0``.
bound : bool, optional
Boolean controls whether bounding triangulation points
and associated triangles are included in the
output. These are introduced as an initial triangulation
to begin the triangulation process. This feature is nice
for debugging output. Default ``False``.
inplace : bool, optional
If ``True``, overwrite this mesh with the triangulated
mesh. Default ``False``.
edge_source : pyvista.PolyData, optional
Specify the source object used to specify constrained
edges and loops. If set, and lines/polygons are defined, a
constrained triangulation is created. The lines/polygons
are assumed to reference points in the input point set
(i.e. point ids are identical in the input and
source).
progress_bar : bool, optional
Display a progress bar to indicate progress. Default
``False``.
Examples
--------
Extract the points of a sphere and then convert the point
cloud to a surface mesh. Note that only the bottom half is
converted to a mesh.
>>> import pyvista as pv
>>> points = pv.PolyData(pv.Sphere().points)
>>> mesh = points.delaunay_2d()
>>> mesh.is_all_triangles()
True
"""
alg = _vtk.vtkDelaunay2D()
alg.SetProjectionPlaneMode(_vtk.VTK_BEST_FITTING_PLANE)
alg.SetInputDataObject(poly_data)
alg.SetTolerance(tol)
alg.SetAlpha(alpha)
alg.SetOffset(offset)
alg.SetBoundingTriangulation(bound)
if edge_source is not None:
alg.SetSourceData(edge_source)
_update_alg(alg, progress_bar, 'Computing 2D Triangulation')
# Sometimes lines are given in the output. The
# `.triangulate()` filter cleans those
mesh = _get_output(alg).triangulate()
if inplace:
poly_data.overwrite(mesh)
return poly_data
else:
return mesh
def compute_arc_length(poly_data):
"""Compute the arc length over the length of the probed line.
It adds a new point-data array named ``"arc_length"`` with the
computed arc length for each of the polylines in the
input. For all other cell types, the arc length is set to 0.
Returns
-------
arc_length : float
Arc length of the length of the probed line.
Examples
--------
>>> import pyvista as pv
>>> sphere = pv.Sphere()
>>> path = sphere.geodesic(0, 100)
>>> length = path.compute_arc_length()['arc_length'][-1]
>>> print(f'Length is {length:.3f}')
Length is 0.812
This is identical to the geodesic_distance.
>>> length = sphere.geodesic_distance(0, 100)
>>> print(f'Length is {length:.3f}')
Length is 0.812
You can also plot the arc_length
>>> arc = path.compute_arc_length()
>>> cpos = arc.plot(scalars="arc_length")
"""
alg = _vtk.vtkAppendArcLength()
alg.SetInputData(poly_data)
alg.Update()
return _get_output(alg)
def project_points_to_plane(poly_data, origin=None, normal=(0, 0, 1),
inplace=False):
"""Project points of this mesh to a plane.
Parameters
----------
origin : np.ndarray or collections.abc.Sequence, optional
Plane origin. Defaults the approximate center of the
input mesh minus half the length of the input mesh in the
direction of the normal.
normal : np.ndarray or collections.abc.Sequence, optional
Plane normal. Defaults to +Z ``[0, 0, 1]``
inplace : bool, optional
Overwrite the original mesh with the projected points
Examples
--------
Flatten a sphere to the XY plane
>>> import pyvista as pv
>>> sphere = pv.Sphere()
>>> projected = sphere.project_points_to_plane([0, 0, 0])
"""
if not isinstance(normal, (np.ndarray, collections.abc.Sequence)) or len(normal) != 3:
raise TypeError('Normal must be a length three vector')
if origin is None:
origin = np.array(poly_data.center) - np.array(normal)*poly_data.length/2.
# choose what mesh to use
if not inplace:
mesh = poly_data.copy()
else:
mesh = poly_data
# Make plane
plane = generate_plane(normal, origin)
# Perform projection in place on the copied mesh
f = lambda p: plane.ProjectPoint(p, p)
np.apply_along_axis(f, 1, mesh.points)
return mesh
def ribbon(poly_data, width=None, scalars=None, angle=0.0, factor=2.0,
normal=None, tcoords=False, preference='points'):
"""Create a ribbon of the lines in this dataset.
Parameters
----------
width : float, optional
Set the "half" width of the ribbon. If the width is
allowed to vary, this is the minimum width. The default is
10% the length.
scalars : str, optional
String name of the scalars array to use to vary the ribbon
width. This is only used if a scalars array is specified.
angle : float, optional
Angle in degrees of the offset angle of the ribbon from
the line normal. The default is 0.0.
factor : float, optional
Set the maximum ribbon width in terms of a multiple of the
minimum width. The default is 2.0
normal : tuple(float), optional
Normal to use as default.
tcoords : bool, str, optional
If ``True``, generate texture coordinates along the
ribbon. This can also be specified to generate the texture
coordinates with either ``'length'`` or ``'normalized'``.
Examples
--------
Convert a line to a ribbon and plot it.
>>> import pyvista as pv
>>> sphere = pv.Sphere()
>>> path = sphere.geodesic(0, 100)
>>> ribbon = path.ribbon()
>>> cpos = pv.plot([sphere, ribbon])
Notes
-----
If there are no lines in the input dataset, then the output
will be an empty ``pyvista.PolyData`` mesh.
"""
if scalars is not None:
arr, field = get_array(poly_data, scalars, preference=preference, info=True)
if width is None:
width = poly_data.length * 0.1
alg = _vtk.vtkRibbonFilter()
alg.SetInputDataObject(poly_data)
alg.SetWidth(width)
if normal is not None:
alg.SetUseDefaultNormal(True)
alg.SetDefaultNormal(normal)
alg.SetAngle(angle)
if scalars is not None:
alg.SetVaryWidth(True)
alg.SetInputArrayToProcess(0, 0, 0, field.value, scalars) # args: (idx, port, connection, field, name)
alg.SetWidthFactor(factor)
else:
alg.SetVaryWidth(False)
if tcoords:
alg.SetGenerateTCoords(True)
if isinstance(tcoords, str):
if tcoords.lower() == 'length':
alg.SetGenerateTCoordsToUseLength()
elif tcoords.lower() == 'normalized':
alg.SetGenerateTCoordsToNormalizedLength()
else:
alg.SetGenerateTCoordsToUseLength()
else:
alg.SetGenerateTCoordsToOff()
alg.Update()
return _get_output(alg)
def extrude(poly_data, vector, inplace=False, progress_bar=False):
"""Sweep polygonal data creating a "skirt" from free edges.
This will create a line from vertices.
This takes polygonal data as input and generates polygonal
data on output. The input dataset is swept according to some
extrusion function and creates new polygonal primitives. These
primitives form a "skirt" or swept surface. For example,
sweeping a line results in a quadrilateral, and sweeping a
triangle creates a "wedge".
There are a number of control parameters for this filter. You
can control whether the sweep of a 2D object (i.e., polygon or
triangle strip) is capped with the generating geometry via the
"Capping" parameter.
The skirt is generated by locating certain topological
features. Free edges (edges of polygons or triangle strips
only used by one polygon or triangle strips) generate
surfaces. This is true also of lines or polylines. Vertices
generate lines.
This filter can be used to create 3D fonts, 3D irregular bar
charts, or to model 2 1/2D objects like punched plates. It
also can be used to create solid objects from 2D polygonal
meshes.
Parameters
----------
mesh : pyvista.PolyData
Mesh to extrude.
vector : np.ndarray or list
Direction and length to extrude the mesh in.
inplace : bool, optional
Overwrites the original mesh in-place.
progress_bar : bool, optional
Display a progress bar to indicate progress.
Examples
--------
Extrude a half arc circle
>>> import pyvista
>>> arc = pyvista.CircularArc([-1, 0, 0], [1, 0, 0], [0, 0, 0])
>>> mesh = arc.extrude([0, 0, 1])
>>> cpos = mesh.plot()
"""
alg = _vtk.vtkLinearExtrusionFilter()
alg.SetExtrusionTypeToVectorExtrusion()
alg.SetVector(*vector)
alg.SetInputData(poly_data)
_update_alg(alg, progress_bar, 'Extruding')
output = pyvista.wrap(alg.GetOutput())
if inplace:
poly_data.overwrite(output)
return poly_data
else:
return output
def extrude_rotate(poly_data, resolution=30, inplace=False,
translation=0.0, dradius=0.0, angle=360.0, progress_bar=False):
"""Sweep polygonal data creating "skirt" from free edges and lines, and lines from vertices.
This is a modeling filter.
This takes polygonal data as input and generates polygonal
data on output. The input dataset is swept around the z-axis
to create new polygonal primitives. These primitives form a
"skirt" or swept surface. For example, sweeping a line
results in a cylindrical shell, and sweeping a circle
creates a torus.
There are a number of control parameters for this filter.
You can control whether the sweep of a 2D object (i.e.,
polygon or triangle strip) is capped with the generating
geometry via the "Capping" instance variable. Also, you can
control the angle of rotation, and whether translation along
the z-axis is performed along with the rotation.
(Translation is useful for creating "springs".) You also can
adjust the radius of the generating geometry using the
"DeltaRotation" instance variable.
The skirt is generated by locating certain topological
features. Free edges (edges of polygons or triangle strips
only used by one polygon or triangle strips) generate
surfaces. This is true also of lines or polylines. Vertices
generate lines.
This filter can be used to model axisymmetric objects like
cylinders, bottles, and wine glasses; or translational/
rotational symmetric objects like springs or corkscrews.
Parameters
----------
resolution : int, optional
Number of pieces to divide line into.
inplace : bool, optional
Overwrites the original mesh inplace.
translation : float, optional
Total amount of translation along the z-axis.
dradius : float, optional
Change in radius during sweep process.
angle : float, optional
The angle of rotation.
progress_bar : bool, optional
Display a progress bar to indicate progress.
Examples
--------
>>> import pyvista
>>> line = pyvista.Line(pointa=(0, 0, 0), pointb=(1, 0, 0))
>>> mesh = line.extrude_rotate(resolution = 4)
>>> cpos = mesh.plot()
"""
if resolution <= 0:
raise ValueError('`resolution` should be positive')
alg = _vtk.vtkRotationalExtrusionFilter()
alg.SetInputData(poly_data)
alg.SetResolution(resolution)
alg.SetTranslation(translation)
alg.SetDeltaRadius(dradius)
alg.SetAngle(angle)
_update_alg(alg, progress_bar, 'Extruding')
output = pyvista.wrap(alg.GetOutput())
if inplace:
poly_data.overwrite(output)
return poly_data
else:
return output
def strip(poly_data, join=False, max_length=1000, pass_cell_data=False,
pass_cell_ids=False, pass_point_ids=False):
"""Strip poly data cells.
Generates triangle strips and/or poly-lines from input
polygons, triangle strips, and lines.
Polygons are assembled into triangle strips only if they are
triangles; other types of polygons are passed through to the
output and not stripped. (Use ``triangulate`` filter to
triangulate non-triangular polygons prior to running this
filter if you need to strip all the data.) The filter will
pass through (to the output) vertices if they are present in
the input polydata.
Also note that if triangle strips or polylines are defined in
the input they are passed through and not joined nor
extended. (If you wish to strip these use ``triangulate``
filter to fragment the input into triangles and lines prior to
running this filter.)
Parameters
----------
join : bool, optional
If ``True``, the output polygonal segments will be joined
if they are contiguous. This is useful after slicing a
surface. The default is ``False``.
max_length : int, optional
Specify the maximum number of triangles in a triangle
strip, and/or the maximum number of lines in a poly-line.
pass_cell_data : bool, optional
Enable/Disable passing of the CellData in the input to the
output as FieldData. Note the field data is transformed.
Default is ``False``.
pass_cell_ids : bool, optional
If ``True``, the output polygonal dataset will have a
celldata array that holds the cell index of the original
3D cell that produced each output cell. This is useful for
picking. The default is ``False`` to conserve memory.
pass_point_ids : bool, optional
If ``True``, the output polygonal dataset will have a
pointdata array that holds the point index of the original
vertex that produced each output vertex. This is useful
for picking. The default is ``False`` to conserve memory.
Examples
--------
>>> from pyvista import examples
>>> mesh = examples.load_airplane()
>>> slc = mesh.slice(normal='z', origin=(0,0,-10))
>>> stripped = slc.strip()
>>> stripped.n_cells
1
"""
alg = _vtk.vtkStripper()
alg.SetInputDataObject(poly_data)
alg.SetJoinContiguousSegments(join)
alg.SetMaximumLength(max_length)
alg.SetPassCellDataAsFieldData(pass_cell_data)
alg.SetPassThroughCellIds(pass_cell_ids)
alg.SetPassThroughPointIds(pass_point_ids)
alg.Update()
return _get_output(alg)
|
rohankumardubey/pyvista
|
pyvista/core/filters/poly_data.py
|
poly_data.py
|
py
| 77,050 |
python
|
en
|
code
| 0 |
github-code
|
6
|
15835641511
|
from config import db
class PricePerHour(db.Model):
id = db.Column(db.Integer, primary_key=True)
date_of_parsing = db.Column(db.String(10), nullable=False)
hour = db.Column(db.Integer, nullable=False)
price = db.Column(db.Float, nullable=False)
sales_volume_MWh = db.Column(db.Float, nullable=False)
purchase_volume_MWh = db.Column(db.Float, nullable=False)
declared_sales_volume_MWh = db.Column(db.Float, nullable=False)
declared_purchase_volume_MWh = db.Column(db.Float, nullable=False)
def to_dict(self):
return {
"id": self.id,
"date_of_parsing": self.date_of_parsing,
"hour": self.hour,
"price": self.price,
"sales_volume_MWh": self.sales_volume_MWh,
"purchase_volume_MWh": self.purchase_volume_MWh,
"declared_sales_volume_MWh": self.declared_sales_volume_MWh,
"declared_purchase_volume_MWh": self.declared_purchase_volume_MWh,
}
|
BohdanLazaryshyn/rdn_test_task
|
models.py
|
models.py
|
py
| 986 |
python
|
en
|
code
| 0 |
github-code
|
6
|
70065264188
|
'Program to create the Functional Requirement Classifer model and validate it'
from fileProcess import FileProcess
import numpy
from pandas import DataFrame
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.pipeline import Pipeline
from sklearn.cross_validation import KFold
from sklearn.metrics import confusion_matrix, f1_score
from sklearn.feature_extraction.text import TfidfTransformer
def build_data_frame(path, classification):
rows = []
index = []
fp = FileProcess()
for file_name, text in fp.read_files(path):
rows.append({'text': text, 'class': classification})
index.append(file_name)
data_frame = DataFrame(rows, index=index)
return data_frame
'Main'
data = DataFrame({'text': [], 'class': []})
for path, classification in FileProcess.SOURCES:
data = data.append(build_data_frame(path, classification))
data = data.reindex(numpy.random.permutation(data.index))
pipeline = Pipeline([
#('count_vectorizer', CountVectorizer(ngram_range=(1, 2))),
('count_vectorizer', CountVectorizer()),
# ('tfidf_transformer', TfidfTransformer()),
('classifier', MultinomialNB())
])
k_fold = KFold(n=len(data), n_folds=10)
scores = []
confusion = numpy.array([[0, 0], [0, 0]])
for train_indices, test_indices in k_fold:
train_text = data.iloc[train_indices]['text'].values
train_y = data.iloc[train_indices]['class'].values.astype(str)
test_text = data.iloc[test_indices]['text'].values
test_y = data.iloc[test_indices]['class'].values.astype(str)
pipeline.fit(train_text, train_y)
predictions = pipeline.predict(test_text)
print("******************* predictions*********")
# print(predictions)
confusion += confusion_matrix(test_y, predictions)
score = f1_score(test_y, predictions, pos_label=FileProcess.FRN)
scores.append(score)
for i in range(0, len(predictions)) :
if predictions[i] != test_y[i] :
print("********text is \n" + test_text[i])
print("The wrong clf is: " + predictions[i])
print("*******************")
print('Total files classified:', len(data))
print('Score:', sum(scores)/len(scores))
print('Confusion matrix:')
print(confusion)
print("++++++++++++ vocabulary from the documents ++++++++++=")
vector = pipeline.named_steps['count_vectorizer']
#print(vector.vocabulary_)
|
xiejen/rfpFunctionReqClf
|
classifier.py
|
classifier.py
|
py
| 2,435 |
python
|
en
|
code
| 0 |
github-code
|
6
|
37300539440
|
import csv
def add_topic_to_csv(url):
try:
id_start_index = url.find('/topics/') + len('/topics/')
id_end_index = url.find('?')
curid = url[id_start_index:id_end_index]
topic_name_start_index = url.rfind('/') + 1
topic_name = url[topic_name_start_index:id_start_index -
len('/topics/')]
with open('topic.csv', 'a', newline='') as file:
writer = csv.writer(file)
writer.writerow([topic_name, curid])
print(f"Added topic {topic_name} to the CSV file.")
except Exception as e:
print(f"Error occurred while processing URL: {str(e)}")
|
Lucascuibu/xis_topic_py
|
topic_grab/add_topic.py
|
add_topic.py
|
py
| 652 |
python
|
en
|
code
| 0 |
github-code
|
6
|
29947747353
|
"""
Tesla Crystal for Tornado wallet
"""
import sys
import time
from os import path
from modules.basehandlers import CrystalHandler
from modules.i18n import get_dt_language
from modules.helpers import base_path
from modules.helpers import async_get_with_http_fallback
sys.path.append('crystals/420_tesla')
from bismuthsimpleasset import BismuthSimpleAsset
from teslaapihandler import TeslaAPIHandler
DEFAULT_THEME_PATH = path.join(base_path(), "crystals/420_tesla/themes/default")
MODULES = {}
__version__ = "1.0.0"
class TeslaHandler(CrystalHandler):
def initialize(self):
# Parent init
super().initialize()
data = ""
self.bismuth_vars["extra"] = {
"header": "<!-- TESLA HEADER -->",
"footer": data,
}
reg = "tesla:register"
unreg = "tesla:unregister"
transfer = "tesla:transfer"
op_data = "tesla:battery"
self.teslahandler = TeslaAPIHandler(self.bismuth,reg,unreg,op_data)
address = "Bis1TeSLaWhTC2ByEwZnYWtsPVK5428uqnL46"
thresholds = {"reg": 25}
checkfunc = {"f": self.teslahandler.checkID}
self.assethandler = BismuthSimpleAsset(self.bismuth,address,reg,unreg,transfer,thresholds,checkfunc)
async def message_popup(self, params=None):
title = self.get_argument("title", default=None, strip=False)
message = self.get_argument("msg", default=None, strip=False)
type = self.get_argument("type", default=None, strip=False)
self.render("message_pop.html", bismuth=self.bismuth_vars, title=title, message=message, type=type)
async def about(self, params=None):
namespace = self.get_template_namespace()
self.bismuth_vars["dtlanguage"] = get_dt_language(self.locale.translate)
kwargs = {"bismuth": self.bismuth_vars}
namespace.update(kwargs)
self.render("about.html", bismuth=self.bismuth_vars)
async def fetch_asset_id(self, params=None):
""""
Fetch asset ID associated with email address. pwd is the vehicle anonymizer
"""
email = self.get_argument("email", default=None, strip=False)
pwd = self.get_argument("pwd", default=None, strip=False) #For XOR
data = self.teslahandler.tesla_vins(email, pwd)
time.sleep(1)
self.render("json.html", data=data)
async def fetch_api_data(self, params=None):
"""
Returns a dict with vehicle data for all VINs associated with email and anonymizer
"""
email = self.get_argument("email", default=None, strip=False)
pwd = self.get_argument("pwd", default=None, strip=False) #For XOR
out = self.teslahandler.fetch_vehicle_data(email,pwd)
self.render("json.html", data=out)
async def check_vin_registrant(self, params=None):
"""
Returns registrant given asset id (vin number in vin_input)
"""
vin = self.get_argument("vin_input", default=None, strip=False)
# First check if this is a valid VIN
data = self.teslahandler.checkVIN(vin)
if data != -1:
# Second check if active wallet address is registrant
data = -1
registrant = self.assethandler.get_registrant(vin)
if registrant == self.bismuth.address:
data = 1
self.render("json.html", data=registrant)
async def check_vin_register(self, params=None):
"""
Checks if an asset id (VIN number) is valid and registered
"""
vin = self.get_argument("vin_input", default=None, strip=False)
# First check if this is a valid VIN
data = self.teslahandler.checkID(vin)
if data != -1:
# Second check if VIN is already registered
registrant = self.assethandler.get_registrant(vin)
if len(registrant) > 0:
data = -1
self.render("json.html", data=data)
async def check_vin_unregister(self, params=None):
"""
Unregisters VIN if valid and current address has previously registered it
"""
vin = self.get_argument("vin_input", default=None, strip=False)
# First check if this is a valid VIN
data = self.teslahandler.checkID(vin)
if data != -1:
# Second check if this account has registered this VIN
registrant = self.assethandler.get_registrant(vin)
if registrant != self.bismuth.address:
data = -1
self.render("json.html", data=data)
async def get_chain_data(self, params=None):
"""
Returns vehicle data as specified by 'variable' between start and end dates
Used for displaying data by DataTable and ChartJS
"""
vin = self.get_argument("vin", default=None, strip=False)
addresses = self.get_argument("address", default=None, strip=False)
variable = self.get_argument("variable", default=None, strip=False)
filter = self.get_argument("filter", default=None, strip=False)
range_unit = self.get_argument("range", default=None, strip=False)
temperature = self.get_argument("temperature", default=None, strip=False)
startdate = self.get_argument("startdate", default=None, strip=False)
enddate = self.get_argument("enddate", default=None, strip=False)
if variable == "battery_cycles":
out = self.teslahandler.get_cycle_data(vin,addresses,"battery_level",filter,range_unit,temperature,startdate,enddate)
else:
out = self.teslahandler.get_chain_data(vin,addresses,variable,filter,range_unit,temperature,startdate,enddate)
self.render("json.html", data=out)
async def get_all_asset_ids(self, params=None):
asset_search = self.get_argument("asset_search", default=None, strip=False)
out = self.assethandler.get_all_asset_ids(asset_search)
self.render("json.html", data=out)
async def page1(self, params=None):
namespace = self.get_template_namespace()
self.bismuth_vars["dtlanguage"] = get_dt_language(self.locale.translate)
kwargs = {"bismuth": self.bismuth_vars}
namespace.update(kwargs)
self.render("page1.html", bismuth=self.bismuth_vars)
async def page2(self, params=None):
namespace = self.get_template_namespace()
self.bismuth_vars["dtlanguage"] = get_dt_language(self.locale.translate)
kwargs = {"bismuth": self.bismuth_vars}
namespace.update(kwargs)
self.render("page2.html", bismuth=self.bismuth_vars)
async def page3(self, params=None):
namespace = self.get_template_namespace()
self.bismuth_vars["dtlanguage"] = get_dt_language(self.locale.translate)
kwargs = {"bismuth": self.bismuth_vars}
namespace.update(kwargs)
self.render("page3.html", bismuth=self.bismuth_vars)
async def get(self, command=""):
command, *params = command.split("/")
if not command:
command = "about"
await getattr(self, command)(params)
async def post(self, command=""):
command, *params = command.split("/")
if not command:
command = "about"
await getattr(self, command)(params)
def get_template_path(self):
"""Override to customize template path for each handler."""
return DEFAULT_THEME_PATH
def static(self):
"""Defining this method will automagically create a static handler pointing to local /static crystal dir"""
pass
|
bismuthfoundation/TornadoWallet
|
wallet/crystals/420_tesla/__init__.py
|
__init__.py
|
py
| 7,537 |
python
|
en
|
code
| 14 |
github-code
|
6
|
17508342693
|
from typing import List
class Solution:
def maxArea(self, height: List[int]) -> int:
i,j=0,len(height)-1
im,jm,mx=0,0,0
while i<j:
val = (j-i)*min(height[i],height[j])
if val > mx:
im,jm,mx=i,j,val
if height[i]<height[j]:
i+=1
else:
j-=1
return mx
print(Solution().maxArea([1,8,6,2,5,4,8,3,7]))
|
soji-omiwade/cs
|
dsa/before_rubrik/container_with_most_water.py
|
container_with_most_water.py
|
py
| 426 |
python
|
en
|
code
| 0 |
github-code
|
6
|
73879522109
|
# -*- coding: utf-8 -*-
import requests
import pandas as pd
import pytest
import urllib
import pprint
# 課題1
def get_api(url):
result = requests.get(url)
return result.json()
def main():
keyword = "鬼滅"
url = "https://app.rakuten.co.jp/services/api/IchibaItem/Search/20170706?format=json&keyword={}&applicationId=1019079537947262807".format(
keyword)
print(get_api(url))
main()
# 課題2
url = 'https://app.rakuten.co.jp/services/api/IchibaItem/Search/20170706'
payload = {
'applicationId': 1017762098426453356,
'keyword': 'Python',
'hits': 10,
'sort': '+itemPrice',
}
r = requests.get(url, params=payload)
resp = r.json()
pprint.pprint(resp)
print ("num of kensaku =",resp['count'])
print ('-'*40)
for i in resp['Items']:
item = i['Item']
print (item['itemName'])
print (item['itemPrice'], 'yen')
# 課題3
url = 'https://app.rakuten.co.jp/services/api/Product/Search/20170426'
payload = {
'applicationId': 1017762098426453356,
'keyword': 'rakuten',
'hits': 10,
'genreId': 560278,
}
r = requests.get(url, params=payload)
resp = r.json()
a=[]
b=[]
for i in resp['Products']:
item = i['Product']
a.append(item['minPrice'])
b.append(item['maxPrice'])
print (item['minPrice'], 'yen')
print(item['maxPrice'], 'yen')
print("最安値は、", min(a), "円です。")
print("最高値は、", max(b), "円です。")
#課題4
url = 'https://app.rakuten.co.jp/services/api/IchibaItem/Search/20140222'
payload = {
'applicationId': 1017762098426453356,
'keyword': 'Python',
'hits': 10,
'sort': '-itemPrice',
'rankTargetProductCount':30485
}
r = requests.get(url, params=payload)
resp = r.json()
print ("num of kensaku =",resp['count'])
print ('-'*40)
a=[]
b=[]
for i in resp['Items']:
item = i['Item']
a.append(item['itemName'])
b.append(item['itemPrice'])
print (item['itemName'])
print (item['itemPrice'], 'yen')
print(len(a), len(b))
df = pd.DataFrame({"Items":a,
"Prices":b})
df.to_csv("/Users/ishikawakanji/Desktop/kadai6/item.csv", encoding="utf-8-sig")
#課題5
def test_get_rakutenAPI():
price_list = list(item['itemName'])
for i in price_list:
print(i)
assert len(i)>=1
assert price_list[0].title
|
KanjiIshikawa-lab/Kadai6syuusei
|
kadai6_4.py
|
kadai6_4.py
|
py
| 2,333 |
python
|
en
|
code
| 0 |
github-code
|
6
|
39174277833
|
import numpy as np
import torch
from torch.utils.data import DataLoader
from random import seed
from dataset import MNIST
from network import FeedForward
from train_mnist import Train, TrainConfig
from plotter import Plotter
np.random.seed(1234)
seed(1234)
torch.manual_seed(1234)
if '__main__' == __name__:
data = dict()
data['train'] = MNIST('./dataset', train=True, download=True, randomize=False)
data['test'] = MNIST('./dataset', train=False)
loader = dict()
loader['train'] = torch.utils.data.DataLoader(data['train'], batch_size=60000, shuffle=False)
loader['test'] = torch.utils.data.DataLoader(data['test'], batch_size=10000, shuffle=False)
# setup
input_size = 28 * 28
output_size = 10
hidden_sizes = [784, 1024, 1024, 20, 20, 20, 10]
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f'to device: {device}')
net = FeedForward(input_size, hidden_sizes, output_size).to(device)
criterion = torch.nn.CrossEntropyLoss(reduction='sum')
optimizer = torch.optim.Adam(net.parameters(), lr=0.001)
cfg = TrainConfig(net, criterion, optimizer)
train = Train(cfg)
train.epochs = 4000
train.mi_cycle = 20
train.run(loader)
train.dump()
plot = Plotter(train)
plot.plot_losses()
plot.plot_accuracy()
plot.plot_info_plan('train')
plot.plot_info_plan('test')
|
shalomma/PytorchBottleneck
|
ib_mnist.py
|
ib_mnist.py
|
py
| 1,394 |
python
|
en
|
code
| 7 |
github-code
|
6
|
8267950596
|
from __future__ import annotations
import socket
import pytest
from kombu import Connection, Consumer, Exchange, Producer, Queue
class test_PyroTransport:
def setup(self):
self.c = Connection(transport='pyro', virtual_host="kombu.broker")
self.e = Exchange('test_transport_pyro')
self.q = Queue('test_transport_pyro',
exchange=self.e,
routing_key='test_transport_pyro')
self.q2 = Queue('test_transport_pyro2',
exchange=self.e,
routing_key='test_transport_pyro2')
self.fanout = Exchange('test_transport_pyro_fanout', type='fanout')
self.q3 = Queue('test_transport_pyro_fanout1',
exchange=self.fanout)
self.q4 = Queue('test_transport_pyro_fanout2',
exchange=self.fanout)
def test_driver_version(self):
assert self.c.transport.driver_version()
@pytest.mark.skip("requires running Pyro nameserver and Kombu Broker")
def test_produce_consume_noack(self):
channel = self.c.channel()
producer = Producer(channel, self.e)
consumer = Consumer(channel, self.q, no_ack=True)
for i in range(10):
producer.publish({'foo': i}, routing_key='test_transport_pyro')
_received = []
def callback(message_data, message):
_received.append(message)
consumer.register_callback(callback)
consumer.consume()
while 1:
if len(_received) == 10:
break
self.c.drain_events()
assert len(_received) == 10
def test_drain_events(self):
with pytest.raises(socket.timeout):
self.c.drain_events(timeout=0.1)
c1 = self.c.channel()
c2 = self.c.channel()
with pytest.raises(socket.timeout):
self.c.drain_events(timeout=0.1)
del c1 # so pyflakes doesn't complain.
del c2
@pytest.mark.skip("requires running Pyro nameserver and Kombu Broker")
def test_drain_events_unregistered_queue(self):
c1 = self.c.channel()
producer = self.c.Producer()
consumer = self.c.Consumer([self.q2])
producer.publish(
{'hello': 'world'},
declare=consumer.queues,
routing_key=self.q2.routing_key,
exchange=self.q2.exchange,
)
message = consumer.queues[0].get()._raw
class Cycle:
def get(self, callback, timeout=None):
return (message, 'foo'), c1
self.c.transport.cycle = Cycle()
self.c.drain_events()
@pytest.mark.skip("requires running Pyro nameserver and Kombu Broker")
def test_queue_for(self):
chan = self.c.channel()
x = chan._queue_for('foo')
assert x
assert chan._queue_for('foo') is x
|
celery/kombu
|
t/unit/transport/test_pyro.py
|
test_pyro.py
|
py
| 2,892 |
python
|
en
|
code
| 2,643 |
github-code
|
6
|
26682571553
|
import sys
import os
import re
import logging
from PyQt5.QtWidgets import QApplication, QWidget, QVBoxLayout, QComboBox, QPushButton, QLabel, QFileDialog, QMainWindow, QMessageBox, QCheckBox
from gui import Ui_MainWindow
from function import *
project_file = ".project"
cproject_file = ".cproject"
recovery_file_sufix = "_old"
HAL_project = "STM32Cube HAL Driver"
Base_project = "Base CMSIS Driver"
EDF_PATH_VAR = "EDF_PATH"
device_cortex_series = {
'STM32F0': 'M0',
'STM32C0': 'M0+',
'STM32L0': 'M0+',
'STM32G0': 'M0+',
'STM32F1': 'M3',
'STM32F2': 'M3',
'STM32L1': 'M3',
'STM32F3': 'M4',
'STM32F4': 'M4',
'STM32G4': 'M4',
'STM32L4': 'M4',
'STM32F7': 'M7',
'STM32h7': 'M7',
}
logging.basicConfig(level=logging.INFO, format='[%(asctime)s] %(levelname)s: %(message)s')
#----------------------------------------------------------------------------------------------------------------------------------------------------------------
class installer_app(Ui_MainWindow, QWidget):
def __init__(self, window, app):
self._window = window
self._app = app
super().__init__()
self.setupUi(self._window)
logging.info("Setup tools statting up.")
# Add widget event handler.
self.Btn_Browser.clicked.connect(self.onButtonBrowserClicked)
self.Btn_Setup.clicked.connect(self.onButtonSetupClicked)
self.Btn_Restore.clicked.connect(self.onButtonRestoreClicked)
self.Btn_Quit.clicked.connect(self.onButtonQuitClicked)
self.Enable_Widget(False)
# Get EDF_PATH environment variable, exit if EDF_PATH is not define.
self._edf_path = get_EDF_PATH()
if self._edf_path == None:
sys.exit()
# Check argument.
if len(sys.argv) > 1:
arg_project_dir = sys.argv[1]
if arg_project_dir:
self.onButtonBrowserClicked(arg_project_dir)
# Show installer application.
self._window.show()
sys.exit(self._app.exec_())
# Enable/disable widget function.
def Enable_Widget(self, en):
self.Btn_Setup.setDisabled(not en)
self.Btn_Restore.setDisabled(not en)
self.Box_Optimize.setDisabled(not en)
self.CB_Printf_Float.setDisabled(not en)
self.CB_Scanf_Float.setDisabled(not en)
#----------------------------------------------------------------------------------------------------------------------------------------------------------------
# Quit button clickec handler.
def onButtonQuitClicked(self):
sys.exit()
#----------------------------------------------------------------------------------------------------------------------------------------------------------------
# Browser button clicked handler.
def onButtonBrowserClicked(self, arg):
# Get project directory in line edit.
if arg:
self._project_dir = arg
else:
self._project_dir = QFileDialog.getExistingDirectory(self, "Project browser", os.path.expanduser("~"), QFileDialog.ShowDirsOnly)
if self._project_dir:
self.LE_Project_Dir.setText(self._project_dir) # Set show directory.
self._project_file_dir = self._project_dir + "/" + project_file
self._cproject_file_dir = self._project_dir + "/" + cproject_file
# Get project background.
self._project_bgr = get_project_background(self._cproject_file_dir)
self.Btn_Project_Bgr.setText(self._project_bgr)
# Get project name.
self._project_name = get_project_name(self._project_file_dir)
if self._project_name != None:
# Get .ioc file name.
if self._project_bgr == HAL_project:
self._ioc_file_dir = self._project_dir + "/" + self._project_name + ".ioc"
else:
logging.error("Can't get project name.")
make_message_box(QMessageBox.Critical, "Error", "Can't get project name.")
return
# Get device full name in .cproject file.
self._device_full_name = get_file_target_name(self._cproject_file_dir)
# Get device series.
self._device_family_series = self._device_full_name[:7]
# Get device cortex series.
self._device_cortex_series = device_cortex_series[self._device_family_series]
# Get project install state.
if get_install_state(self._cproject_file_dir) != True:
self.Btn_Setup.setDisabled(True)
else:
self.Btn_Setup.setDisabled(False)
if self._device_full_name == None:
self.Btn_Device_Full_Name.setText("STM32xxxxxxxxx")
self.Btn_Device_Series.setText("Unknown")
make_message_box(QMessageBox.Critical, "Error", "Unknown STM32 device name.")
logging.error("Unknown device name in project.")
self.Enable_Widget(False)
return
else:
self.Btn_Device_Full_Name.setText(self._device_full_name)
self.Btn_Device_Series.setText(self._device_full_name[:7] + f'(Cortex-{self._device_cortex_series})')
self.Enable_Widget(True)
#----------------------------------------------------------------------------------------------------------------------------------------------------------------
# Setup button handler.
def onButtonSetupClicked(self):
logging.info(f"Project Information:")
logging.info(f"\tDirectory: {self._project_dir}")
logging.info(f"\tName: {self._project_name}")
logging.info(f"\tDevice: {self._device_full_name}")
logging.info(f"\tSeries: {self._device_family_series}")
logging.info(f"\Core: {self._device_cortex_series}")
# Create recovery file.
copy_file(self._cproject_file_dir, self._cproject_file_dir + recovery_file_sufix, False)
copy_file(self._project_file_dir, self._project_file_dir + recovery_file_sufix, False)
if self._project_bgr == HAL_project:
copy_file(self._ioc_file_dir, self._ioc_file_dir + recovery_file_sufix, False)
# Get build option optimize.
sel_opt = self.Box_Optimize.currentText()
start_index = sel_opt.find("(") + 1
end_index = sel_opt.find(")", start_index)
self.build_optimize_level = sel_opt[start_index:end_index]
logging.info(f"\tOptimize level: {self.build_optimize_level}")
# Get option printf float.
if self.CB_Printf_Float.checkState() == 0:
self.printf_float = "false"
else:
self.printf_float = "true"
logging.info(f"\tPrintf float: {self.printf_float}")
# Get option scanf float.
if self.CB_Scanf_Float.checkState() == 0:
self.scanf_float = "false"
else:
self.scanf_float = "true"
logging.info(f"\tScanf float: {self.scanf_float}")
# Config .cproject file.
c_symbols_replace = "<listOptionValue builtIn=\"false\" value=\"DEBUG\"/>\n"
symbols_insert = ( "\n\t\t\t\t\t\t\t\t\t<listOptionValue builtIn=\"false\" value=\"DEBUG\"/>\n"
"\t\t\t\t\t\t\t\t\t<listOptionValue builtIn=\"false\" value=\"_GNU_SOURCE\"/>\n"
"\t\t\t\t\t\t\t\t\t<listOptionValue builtIn=\"false\" value=\"__STM32__\"/>\n"
f"\t\t\t\t\t\t\t\t\t<listOptionValue builtIn=\"false\" value=\"{self._device_family_series}\"/>\n"
f"\t\t\t\t\t\t\t\t\t<listOptionValue builtIn=\"false\" value=\"{self._device_full_name}\"/>\n"
f"\t\t\t\t\t\t\t\t\t<listOptionValue builtIn=\"false\" value=\"DEVICE_NAME="{self._device_full_name}"\"/>")
symbols_insert_HAL = ("\n\t\t\t\t\t\t\t\t\t<listOptionValue builtIn=\"false\" value=\"USE_HAL_DRIVER\"/>")
source_folder_replace = "<sourceEntries>"
source_folder_insert = ("\t\t\t\t\t\t<entry flags=\"VALUE_WORKSPACE_PATH|RESOLVED\" kind=\"sourcePath\" name=\"edf_core\"/>\n"
"\t\t\t\t\t\t<entry flags=\"VALUE_WORKSPACE_PATH|RESOLVED\" kind=\"sourcePath\" name=\"edf_rtos\"/>\n"
"\t\t\t\t\t\t<entry flags=\"VALUE_WORKSPACE_PATH|RESOLVED\" kind=\"sourcePath\" name=\"edf_middlewares\"/>\n"
"\t\t\t\t\t\t<entry flags=\"VALUE_WORKSPACE_PATH|RESOLVED\" kind=\"sourcePath\" name=\"main\"/>")
c_include_path_replace = "name=\"Include paths (-I)\" superClass=\"com.st.stm32cube.ide.mcu.gnu.managedbuild.tool.c.compiler.option.includepaths\" useByScannerDiscovery=\"false\" valueType=\"includePath\">"
cpp_include_path_replace = "name=\"Include paths (-I)\" superClass=\"com.st.stm32cube.ide.mcu.gnu.managedbuild.tool.cpp.compiler.option.includepaths\" useByScannerDiscovery=\"false\" valueType=\"includePath\">"
include_path_insert = ( "\t\t\t\t\t\t\t\t\t<listOptionValue builtIn=\"false\" value=\"../buildconfig\"/>\n"
f"\t\t\t\t\t\t\t\t\t<listOptionValue builtIn=\"false\" value=\""${{{EDF_PATH_VAR}}}/components/core/include"\"/>\n"
f"\t\t\t\t\t\t\t\t\t<listOptionValue builtIn=\"false\" value=\""${{{EDF_PATH_VAR}}}/components/freertos/os_c{self._device_cortex_series.lower()}"\"/>\n")
c_optimize_level_replace = 'name="Optimization level" superClass="com.st.stm32cube.ide.mcu.gnu.managedbuild.tool.c.compiler.option.optimization.level" useByScannerDiscovery="false"/>'
cpp_optimize_level_replace = 'name="Optimization level" superClass="com.st.stm32cube.ide.mcu.gnu.managedbuild.tool.cpp.compiler.option.optimization.level" useByScannerDiscovery="false"/>'
c_optimize_level_pattern = r'name="Optimization level" superClass="com.st.stm32cube.ide.mcu.gnu.managedbuild.tool.c.compiler.option.optimization.level" useByScannerDiscovery="false" value="com\.st\.stm32cube\.ide\.mcu\.gnu\.managedbuild\.tool\.c\.compiler\.option\.optimization\.level\.value\..*?\" valueType="enumerated"/>'
cpp_optimize_level_pattern = r'name="Optimization level" superClass="com.st.stm32cube.ide.mcu.gnu.managedbuild.tool.cpp.compiler.option.optimization.level" useByScannerDiscovery="false" value="com\.st\.stm32cube\.ide\.mcu\.gnu\.managedbuild\.tool\.cpp\.compiler\.option\.optimization\.level\.value\..*?\" valueType="enumerated"/>'
c_optimize_level_insert = f'name="Optimization level" superClass="com.st.stm32cube.ide.mcu.gnu.managedbuild.tool.c.compiler.option.optimization.level" useByScannerDiscovery="false" value="com.st.stm32cube.ide.mcu.gnu.managedbuild.tool.c.compiler.option.optimization.level.value.{self.build_optimize_level}\" valueType="enumerated"/>'
cpp_optimize_level_insert = f'name="Optimization level" superClass="com.st.stm32cube.ide.mcu.gnu.managedbuild.tool.cpp.compiler.option.optimization.level" useByScannerDiscovery="false" value="com.st.stm32cube.ide.mcu.gnu.managedbuild.tool.cpp.compiler.option.optimization.level.value.{self.build_optimize_level}\" valueType="enumerated"/>'
printf_float_pattern = r'<option id="com\.st\.stm32cube\.ide\.mcu\.gnu\.managedbuild\.option\.nanoprintffloat\.\d+" name="Use float with printf from newlib-nano \(-u _printf_float\)" superClass="com\.st\.stm32cube\.ide\.mcu\.gnu\.managedbuild\.option\.nanoprintffloat" useByScannerDiscovery="false" value=".+" valueType="boolean"/>'
printf_float_replace = f'<option id="com.st.stm32cube.ide.mcu.gnu.managedbuild.option.nanoprintffloat.1816458521" name="Use float with printf from newlib-nano (-u _printf_float)" superClass="com.st.stm32cube.ide.mcu.gnu.managedbuild.option.nanoprintffloat" useByScannerDiscovery="false" value="{self.printf_float}" valueType="boolean"/>'
scanf_float_pattern = r'<option id="com\.st\.stm32cube\.ide\.mcu\.gnu\.managedbuild\.option\.nanoscanffloat\.\d+" superClass="com\.st\.stm32cube\.ide\.mcu\.gnu\.managedbuild\.option\.nanoscanffloat" value=".+" valueType="boolean"/>'
scanf_float_replace = f'<option id="com.st.stm32cube.ide.mcu.gnu.managedbuild.option.nanoscanffloat.653624109" superClass="com.st.stm32cube.ide.mcu.gnu.managedbuild.option.nanoscanffloat" value="{self.scanf_float}" valueType="boolean"/>'
try:
with open(self._cproject_file_dir, 'r') as f:
file_content = f.read()
new_file_content = file_content
# Xóa hết các symbols hiện có.
# pattern = re.compile(c_symbols_pattern, re.DOTALL)
# new_file_content = re.sub(pattern, r'\1\n\t\t\t\t\t\t\t\t\2', file_content)
# pattern = re.compile(cpp_symbols_pattern, re.DOTALL)
# new_file_content = re.sub(pattern, r'\1\n\t\t\t\t\t\t\t\t\2', new_file_content)
# Insert symbols
if self._project_bgr == HAL_project:
new_file_content = new_file_content.replace(c_symbols_replace, c_symbols_replace + symbols_insert + symbols_insert_HAL)
# new_file_content = new_file_content.replace(cpp_symbols_replace, cpp_symbols_replace + symbols_insert + symbols_insert_HAL)
else:
new_file_content = new_file_content.replace(c_symbols_replace, c_symbols_replace + symbols_insert)
# new_file_content = new_file_content.replace(cpp_symbols_replace, cpp_symbols_replace + symbols_insert)
# Insert c include path
new_file_content = new_file_content.replace(c_include_path_replace, c_include_path_replace + "\n" + include_path_insert)
# Insert cpp include path
new_file_content = new_file_content.replace(cpp_include_path_replace, cpp_include_path_replace + "\n" + include_path_insert)
# Insert source folder directory
new_file_content = new_file_content.replace(source_folder_replace, source_folder_replace + "\n" + source_folder_insert)
# Insert c optimize level
if new_file_content.find(c_optimize_level_replace) != -1:
new_file_content = new_file_content.replace(c_optimize_level_replace, c_optimize_level_insert)
else:
new_file_content = re.sub(c_optimize_level_pattern, c_optimize_level_insert, new_file_content, count=1)
# Insert cpp optimize level
if new_file_content.find(cpp_optimize_level_replace) != -1:
new_file_content = new_file_content.replace(cpp_optimize_level_replace, cpp_optimize_level_insert)
else:
new_file_content = re.sub(cpp_optimize_level_pattern, cpp_optimize_level_insert, new_file_content, count=1)
# Change printf float option.
new_file_content = find_and_replace_printf_or_scanf(new_file_content, printf_float_pattern, printf_float_replace)
# Change scanf float option.
new_file_content = find_and_replace_printf_or_scanf(new_file_content, scanf_float_pattern, scanf_float_replace)
with open(self._cproject_file_dir, 'w') as f:
f.write(new_file_content)
logging.info(f"Config .cproject file successful")
except FileNotFoundError:
make_message_box(QMessageBox.Critical, "Error", f"{self._cproject_file_dir}: No such file in directory.")
logging.error(f"{self._cproject_file_dir} -> No such file in directory.")
return
# Config .project file.
project_desciption_replace = "</projectDescription>"
project_desciption_insert = ("\t<linkedResources>\n"
"\t\t<link>\n"
"\t\t\t<name>edf_core</name>\n"
"\t\t\t<type>2</type>\n"
f"\t\t\t<location>{self._edf_path}/components/core/source</location>\n"
"\t\t</link>\n"
"\t\t<link>\n"
"\t\t\t<name>edf_rtos</name>\n"
"\t\t\t<type>2</type>\n"
f"\t\t\t<location>{self._edf_path}/components/freertos/os_c{self._device_cortex_series.lower()}</location>\n"
"\t\t</link>\n"
"\t\t<link>\n"
"\t\t\t<name>edf_middlewares</name>\n"
"\t\t\t<type>2</type>\n"
f"\t\t\t<location>{self._edf_path}/components/middlewares</location>\n"
"\t\t</link>\n"
"\t</linkedResources>\n"
"</projectDescription>")
linked_source_check = "<name>edf_core</name>"
try:
with open(self._project_file_dir, 'r') as f:
file_content = f.read()
if not (linked_source_check in file_content):
new_file_content = file_content.replace(project_desciption_replace, project_desciption_insert)
with open(self._project_file_dir, 'w') as file:
file.write(new_file_content)
logging.info(f"Config .project file successful")
except FileNotFoundError:
make_message_box(QMessageBox.Critical, "Error", f"{self._project_file_dir}: No such file in directory.")
logging.error(f"{self._project_file_dir} -> No such file in directory.")
return
# Config .ioc file.
if self._project_bgr == HAL_project:
with open(self._ioc_file_dir, 'r') as f:
file_content = f.read()
busfault_handler_replace = "NVIC.BusFault_IRQn=true\:0\:0\:false\:false\:true\:false\:false\:false"
debugmon_handler_replace = "NVIC.DebugMonitor_IRQn=true\:0\:0\:false\:false\:true\:false\:false\:false"
hardfault_handler_replace = "NVIC.HardFault_IRQn=true\:0\:0\:false\:false\:true\:false\:false\:false"
memmanage_handler_replace = "NVIC.MemoryManagement_IRQn=true\:0\:0\:false\:false\:true\:false\:false\:false"
nmi_handler_replace = "NVIC.NonMaskableInt_IRQn=true\:0\:0\:false\:false\:true\:false\:false\:false"
pendsv_handler_replace = "NVIC.PendSV_IRQn=true\:0\:0\:false\:false\:true\:false\:false\:false"
svcall_handler_replace = "NVIC.SVCall_IRQn=true\:0\:0\:false\:false\:true\:false\:false\:false"
systick_handler_replace = "NVIC.SysTick_IRQn=true\:15\:0\:false\:false\:true\:false\:true\:false"
usagefault_handler_replace = "NVIC.UsageFault_IRQn=true\:0\:0\:false\:false\:true\:false\:false\:false"
busfault_handler_insert = "NVIC.BusFault_IRQn=true\:0\:0\:false\:false\:false\:false\:false\:false"
debugmon_handler_insert = "NVIC.DebugMonitor_IRQn=true\:0\:0\:false\:false\:false\:false\:false\:false"
hardfault_handler_insert = "NVIC.HardFault_IRQn=true\:0\:0\:false\:false\:false\:false\:false\:false"
memmanage_handler_insert = "NVIC.MemoryManagement_IRQn=true\:0\:0\:false\:false\:false\:false\:false\:false"
nmi_handler_insert = "NVIC.NonMaskableInt_IRQn=true\:0\:0\:false\:false\:false\:false\:false\:false"
pendsv_handler_insert = "NVIC.PendSV_IRQn=true\:0\:0\:false\:false\:false\:false\:false\:false"
svcall_handler_insert = "NVIC.SVCall_IRQn=true\:0\:0\:false\:false\:false\:false\:false\:false"
systick_handler_insert = "NVIC.SysTick_IRQn=true\:15\:0\:false\:false\:false\:false\:false\:false"
usagefault_handler_insert = "NVIC.UsageFault_IRQn=true\:0\:0\:false\:false\:false\:false\:false\:false"
if busfault_handler_replace in file_content:
new_file_content = file_content.replace(busfault_handler_replace, busfault_handler_insert)
if debugmon_handler_replace in file_content:
new_file_content = new_file_content.replace(debugmon_handler_replace, debugmon_handler_insert)
if hardfault_handler_replace in file_content:
new_file_content = new_file_content.replace(hardfault_handler_replace, hardfault_handler_insert)
if memmanage_handler_replace in file_content:
new_file_content = new_file_content.replace(memmanage_handler_replace, memmanage_handler_insert)
if nmi_handler_replace in file_content:
new_file_content = new_file_content.replace(nmi_handler_replace, nmi_handler_insert)
if pendsv_handler_replace in file_content:
new_file_content = new_file_content.replace(pendsv_handler_replace, pendsv_handler_insert)
if svcall_handler_replace in file_content:
new_file_content = new_file_content.replace(svcall_handler_replace, svcall_handler_insert)
if systick_handler_replace in file_content:
new_file_content = new_file_content.replace(systick_handler_replace, systick_handler_insert)
if usagefault_handler_replace in file_content:
new_file_content = new_file_content.replace(usagefault_handler_replace, usagefault_handler_insert)
with open(self._ioc_file_dir, 'w') as f:
f.write(new_file_content)
logging.info(f"Config .ioc file successful")
# Add edf_main_application into file main.c.
if self._project_bgr == HAL_project:
main_file_path = os.path.join(self._project_dir, "Core/Src/main.c")
try:
with open(main_file_path, 'r') as f:
file_content = f.read()
# Lấy các hàm init của STM32 HAL Driver.
pattern = re.compile(r'/\*\s*Initialize all configured peripherals\s*\*/\n\s*(.*?)\n\s*/\*\s*USER CODE BEGIN 2\s*\*/', re.DOTALL)
match = pattern.search(file_content)
if match:
HAL_init_func = match.group(1)
if file_content.find("/* USER CODE BEGIN 0 */") != -1:
new_file_content = file_content.replace("/* USER CODE BEGIN 0 */",
"/* USER CODE BEGIN 0 */\n"
"void HAL_driver_init(void){\n"
f"\t{HAL_init_func}"
"\n}"
)
if file_content.find("main_application") == -1:
if file_content.find("/* USER CODE BEGIN 1 */") != -1:
new_file_content = new_file_content.replace("/* USER CODE BEGIN 1 */",
"/* USER CODE BEGIN 1 */\n"
"\textern int edf_main_application(void);\n"
"\treturn edf_main_application();"
)
with open(main_file_path, 'w') as file:
file.write(new_file_content)
# Hien message box neu khong mo duoc file.
except FileNotFoundError:
make_message_box(QMessageBox.Critical, "Error", "main.c: No such file in directory.")
logging.error(f"/Core/main.c -> No such file in directory.")
return
# Remove exception_interrupt_handler from file stm32fxxx_it.c.
if self._project_bgr == HAL_project:
it_file = "Core/Src/" + self._device_family_series.lower() + "xx_it.c"
it_file_path = os.path.join(self._project_dir, it_file)
start_marker = ("/**\n"
" * @brief This function handles Non maskable interrupt.\n"
" */\n"
"void NMI_Handler(void)")
end_marker = "/* USER CODE END SysTick_IRQn 1 */\n}"
try:
with open(it_file_path, "r") as input_file:
content = input_file.read()
start_index = content.find(start_marker)
end_index = content.find(end_marker, start_index)
if start_index != -1 and end_index != -1:
output_content = content[:start_index] + content[end_index + len(end_marker):]
with open(it_file_path, "w") as output_file:
output_file.write(output_content)
else:
logging.error(f"{it_file_path}: Error during edit file.")
except FileNotFoundError:
make_message_box(QMessageBox.Critical, "Error", f"{it_file_path}: No such file in directory.")
logging.error(f"{it_file_path}: No such file in directory.")
return
# Create folder main.
try:
os.system(f"mkdir {self._project_dir}/main")
os.system(f"cp {self._edf_path}/components/templates/source/app_main.cpp {self._project_dir}/main/")
# Create folder config.
os.system(f"mkdir {self._project_dir}/buildconfig")
os.system(f"cp {self._edf_path}/components/templates/header/* {self._project_dir}/buildconfig")
os.system(f"cp {self._edf_path}/components/templates/kconfig/* {self._project_dir}/buildconfig")
except Exception as e:
logging.error(f"Error creating folder: {e}")
# Get project state.
try:
with open(self._cproject_file_dir, 'r') as f:
file_content = f.read()
if file_content.find("STM_EDF_VERSION") != -1:
self.Btn_Setup.setDisabled(True)
else:
self.Btn_Setup.setDisabled(False)
except FileNotFoundError:
make_message_box(QMessageBox.Critical, "Error", ".cproject: No such file in directory.")
logging.error(f"{self._cproject_file_dir} -> No such file in directory.")
return
make_message_box(QMessageBox.Information, "Progress", "Setup successful.")
logging.info("Setup successful.")
os.system(f"{self._edf_path}/tools/kconfig/dist/kconfig {self._project_dir}")
#----------------------------------------------------------------------------------------------------------------------------------------------------------------
# Restore button handler.
def onButtonRestoreClicked(self):
logging.info(f"Uninstall STM32 RTOSSDK form {self._project_dir}")
# Trả về cấu hình ban đầu cho .cproject và .project.
if (not copy_file(self._cproject_file_dir + recovery_file_sufix, self._cproject_file_dir, True)) or \
not copy_file(self._project_file_dir + recovery_file_sufix, self._project_file_dir, True) or \
not copy_file(self._ioc_file_dir + recovery_file_sufix, self._ioc_file_dir, True) :
make_message_box(QMessageBox.Critical, "Error", "Can't Restore from project.")
logging.error("Can't uninstall from project.")
# Remove edf function main.c and stm32xxxxx_it.c
if self._project_bgr == HAL_project:
# main.c
main_file_path = os.path.join(self._project_dir, "Core/Src/main.c")
try:
with open(main_file_path, 'r') as f:
file_content = f.read()
pattern = re.compile(r'\/\*\s*USER CODE BEGIN 0\s*\*\/\n.*?\n\s*\/\*\s*USER CODE END 0\s*\*\/', re.DOTALL)
main_c_replace = '/* USER CODE BEGIN 0 */\n\n/* USER CODE END 0 */'
new_file_content = re.sub(pattern, main_c_replace, file_content)
pattern = re.compile(r'\/\*\s*USER CODE BEGIN 1\s*\*\/\n.*?\n\s*\/\*\s*USER CODE END 1\s*\*\/', re.DOTALL)
main_c_replace = '/* USER CODE BEGIN 1 */\n\n /* USER CODE END 1 */'
new_file_content = re.sub(pattern, main_c_replace, new_file_content)
with open(main_file_path, 'w') as file:
file.write(new_file_content)
# Hien message box neu khong mo duoc file.
except FileNotFoundError:
make_message_box(QMessageBox.Critical, "Error", "main.c: No such file in directory.")
logging.error("main.c: No such file in directory.")
return
# Get project state.
try:
with open(self._cproject_file_dir, 'r') as f:
file_content = f.read()
if file_content.find("STM_EDF_VERSION") != -1:
self.Btn_Setup.setDisabled(True)
else:
self.Btn_Setup.setDisabled(False)
except FileNotFoundError:
make_message_box(QMessageBox.Critical, "Error", ".cproject: No such file in directory.")
make_message_box(QMessageBox.Information, "Progress", "Restore successful.")
logging.info("Uninstall successful.")
#----------------------------------------------------------------------------------------------------------------------------------------------------------------
if __name__ == '__main__':
app = QApplication(sys.argv)
window = QMainWindow()
inst_gui = installer_app(window, app)
# extern int main_application(void);
# return main_application();
# <linkedResources>
# <link>
# <name>rtossdk</name>
# <type>2</type>
# <location>/home/anh/Projects/CODE/STM32/RTOSSDK/rtossdk</location>
# </link>
# </linkedResources>
# if self.projectbgr == HAL_project:
# it_file = "Core/Src/" + self.device_series.lower() + "xx_it.c";
# it_file_path = os.path.join(self.projectdir, it_file)
# try:
# with open(it_file_path, 'r') as f:
# file_content = f.read()
# # Include libary and declare variable
# if file_content.find("/* USER CODE BEGIN Includes */") != -1:
# new_file_content = file_content.replace("/* USER CODE BEGIN Includes */",
# "/* USER CODE BEGIN Includes */\n"
# "#include \"freertos_port/app_port/freertos_port.h\""
# )
# if file_content.find("/* USER CODE BEGIN EV */") != -1:
# new_file_content = new_file_content.replace("/* USER CODE BEGIN EV */",
# "/* USER CODE BEGIN EV */\n"
# "extern void exception_interrupt_handler(const char *tag, char *message);\n"
# "static const char *Excep_TAG = \"EXCEPTION\";"
# )
# # Fault notify.
# if file_content.find("/* USER CODE BEGIN HardFault_IRQn 0 */") != -1:
# new_file_content = new_file_content.replace("/* USER CODE BEGIN HardFault_IRQn 0 */",
# "/* USER CODE BEGIN HardFault_IRQn 0 */\n"
# "\texception_interrupt_handler(Excep_TAG, (char *)\"Hard fault exception was unhandled(call HardFault_Handler)...\");"
# )
# if file_content.find("/* USER CODE BEGIN MemoryManagement_IRQn 0 */") != -1:
# new_file_content = new_file_content.replace("/* USER CODE BEGIN MemoryManagement_IRQn 0 */",
# "/* USER CODE BEGIN MemoryManagement_IRQn 0 */\n"
# "\texception_interrupt_handler(Excep_TAG, (char *)\"Memory management interrupt was unhandled(call MemManage_Handler)...\");"
# )
# if file_content.find("/* USER CODE BEGIN BusFault_IRQn 0 */") != -1:
# new_file_content = new_file_content.replace("/* USER CODE BEGIN BusFault_IRQn 0 */",
# "/* USER CODE BEGIN BusFault_IRQn 0 */\n"
# "\texception_interrupt_handler(Excep_TAG, (char *)\"Bus fault exception was unhandled(call BusFault_Handler)...\");"
# )
# if file_content.find("/* USER CODE BEGIN UsageFault_IRQn 0 */") != -1:
# new_file_content = new_file_content.replace("/* USER CODE BEGIN UsageFault_IRQn 0 */",
# "/* USER CODE BEGIN UsageFault_IRQn 0 */\n"
# "\texception_interrupt_handler(Excep_TAG, (char *)\"Usage fault exception was unhandled(call UsageFault_Handler)...\");"
# )
# # Port freertos handler.
# if file_content.find("/* USER CODE BEGIN SVCall_IRQn 0 */") != -1:
# new_file_content = new_file_content.replace("/* USER CODE BEGIN SVCall_IRQn 0 */",
# "/* USER CODE BEGIN SVCall_IRQn 0 */\n"
# "\tfreertos_svc_handler();"
# )
# if file_content.find("/* USER CODE BEGIN PendSV_IRQn 0 */") != -1:
# new_file_content = new_file_content.replace("/* USER CODE BEGIN PendSV_IRQn 0 */",
# "/* USER CODE BEGIN PendSV_IRQn 0 */\n"
# "\tfreertos_pendsv_handler();"
# )
# if file_content.find("/* USER CODE BEGIN SysTick_IRQn 0 */") != -1:
# new_file_content = new_file_content.replace("/* USER CODE BEGIN SysTick_IRQn 0 */",
# "/* USER CODE BEGIN SysTick_IRQn 0 */\n"
# "\textern void systick_app_systick_process(void);\n"
# "\tsystick_app_systick_process();\n"
# "\tfreertos_tick_handler();"
# )
# with open(it_file_path, 'w') as file:
# file.write(new_file_content)
# # Hien message box neu khong mo duoc file.
# except FileNotFoundError:
# make_message_box(QMessageBox.Critical, "Error", f"{it_file}: No such file in directory.")
# return
# # stm32xxxxx_it.c
# it_file = "Core/Src/" + self.device_series.lower() + "xx_it.c";
# it_file_path = os.path.join(self.projectdir, it_file)
# try:
# with open(it_file_path, 'r') as f:
# file_content = f.read()
# pattern = re.compile(r'\/\*\s*USER CODE BEGIN EV\s*\*\/\n.*?\n\s*\/\*\s*USER CODE END EV\s*\*\/', re.DOTALL)
# main_c_replace = '/* USER CODE BEGIN EV */\n\n /* USER CODE END EV */'
# new_file_content = re.sub(pattern, main_c_replace, file_content)
# pattern = re.compile(r'\/\*\s*USER CODE BEGIN HardFault_IRQn 0\s*\*\/\n.*?\n\s*\/\*\s*USER CODE END HardFault_IRQn 0\s*\*\/', re.DOTALL)
# main_c_replace = '/* USER CODE BEGIN HardFault_IRQn 0 */\n\n /* USER CODE END HardFault_IRQn 0 */'
# new_file_content = re.sub(pattern, main_c_replace, new_file_content)
# pattern = re.compile(r'\/\*\s*USER CODE BEGIN MemoryManagement_IRQn 0\s*\*\/\n.*?\n\s*\/\*\s*USER CODE END MemoryManagement_IRQn 0\s*\*\/', re.DOTALL)
# main_c_replace = '/* USER CODE BEGIN MemoryManagement_IRQn 0 */\n\n /* USER CODE END MemoryManagement_IRQn 0 */'
# new_file_content = re.sub(pattern, main_c_replace, new_file_content)
# pattern = re.compile(r'\/\*\s*USER CODE BEGIN BusFault_IRQn 0\s*\*\/\n.*?\n\s*\/\*\s*USER CODE END BusFault_IRQn 0\s*\*\/', re.DOTALL)
# main_c_replace = '/* USER CODE BEGIN BusFault_IRQn 0 */\n\n /* USER CODE END BusFault_IRQn 0 */'
# new_file_content = re.sub(pattern, main_c_replace, new_file_content)
# pattern = re.compile(r'\/\*\s*USER CODE BEGIN UsageFault_IRQn 0\s*\*\/\n.*?\n\s*\/\*\s*USER CODE END UsageFault_IRQn 0\s*\*\/', re.DOTALL)
# main_c_replace = '/* USER CODE BEGIN UsageFault_IRQn 0 */\n\n /* USER CODE END UsageFault_IRQn 0 */'
# new_file_content = re.sub(pattern, main_c_replace, new_file_content)
# pattern = re.compile(r'\/\*\s*USER CODE BEGIN SysTick_IRQn 0\s*\*\/\n.*?\n\s*\/\*\s*USER CODE END SysTick_IRQn 0\s*\*\/', re.DOTALL)
# main_c_replace = '/* USER CODE BEGIN SysTick_IRQn 0 */\n\n /* USER CODE END SysTick_IRQn 0 */'
# new_file_content = re.sub(pattern, main_c_replace, new_file_content)
# with open(it_file_path, 'w') as file:
# file.write(new_file_content)
# # Hien message box neu khong mo duoc file.
# except FileNotFoundError:
# make_message_box(QMessageBox.Critical, "Error", "main.c: No such file in directory.")
# return
|
maivananh111/stm-edf
|
tools/setup/setup.py
|
setup.py
|
py
| 38,714 |
python
|
en
|
code
| 0 |
github-code
|
6
|
34214181391
|
t = int(input())
for _ in range(t):
lst = input().split(" ")
total = int(lst[0])
diff = 0
for i in range(1, len(lst)-1):
add = int(lst[i])
diff += max(add-total*2, 0)
total = add
print(diff)
|
david-vinje/kattis-problems
|
Solutions/Zanzibar.py
|
Zanzibar.py
|
py
| 213 |
python
|
en
|
code
| 0 |
github-code
|
6
|
28076023790
|
from .gaussian_attention import gaussian_mask, gaussian_attention
from keras.layers import Layer
class VisualAttentionLayer(Layer):
def __init__(self, output_dim, transpose=False, **kwargs):
if len(output_dim) != 2:
raise ValueError("`output_dim` has to be a 2D tensor [Height, Width].")
self._output_dim = output_dim
super(VisualAttentionLayer, self).__init__(**kwargs)
def build(self, input_shape):
super(VisualAttentionLayer, self).build(input_shape)
def call(self, x):
if len(x) != 2:
raise ValueError("Input of the layer has to consist of 2 different inputs: the images and the parameters.")
img_tensor, transform_params = x
return gaussian_attention(img_tensor, transform_params, self._output_dim)
def compute_output_shape(self, input_shape):
if len(input_shape) == 2 and len(input_shape[0]) == 4:
return (None, *self._output_dim, input_shape[0][-1])
else:
raise ValueError("The `input_shape` is not correct.")
|
zimmerrol/tf_keras_attention
|
src/gaussian_attention_layer.py
|
gaussian_attention_layer.py
|
py
| 1,066 |
python
|
en
|
code
| 2 |
github-code
|
6
|
3536318559
|
from django.db import models
class Task(models.Model):
username = models.CharField(verbose_name='Имя сотрудника', max_length=30)
task_name = models.CharField(verbose_name='Текст задачи', max_length=100)
per_day = models.PositiveIntegerField(
default=1,
verbose_name='Количество напоминаний за 1 день'
)
def time_dates(self):
quantity = 24 / self.per_day
time_list = [0, ] # [0, 6, 12, 18]
for num in range(self.per_day - 1):
new_time = time_list[num] + quantity
time_list.append(new_time)
return time_list
|
DalaevBC/ping_bot
|
inside/models.py
|
models.py
|
py
| 658 |
python
|
en
|
code
| 0 |
github-code
|
6
|
40199173264
|
from django.test import TestCase
from django.urls import reverse
from . import utils
class TestView(TestCase):
"""
Test that access to views that accept get do not raise exception.
"""
def setUp(self) -> None:
self.views = [
{"name": 'index', 'requires_authentication': False},
{"name": 'about', 'requires_authentication': False},
]
self.configuration = utils.createConfiguration()
return super().setUp()
def test_access_to_views(self):
for view in self.views:
view_name = view['name']
response = self.client.get(reverse(f'core:{view_name}'))
if view['requires_authentication']:
self.assertEqual(response.status_code, 302, f"Access to core:{view_name} raised unexpected status code")
else:
self.assertEqual(response.status_code, 200, f"Access to core:{view_name} raised unexpected status code")
|
Koffi-Cobbin/ACES-WEB
|
core/tests/test_views.py
|
test_views.py
|
py
| 961 |
python
|
en
|
code
| 2 |
github-code
|
6
|
40367677442
|
import pygame
import random
from Farm import Farm
from Lab import Lab
from Armor import Armor
from PowerPlant import PowerPlant
from Battery import Battery
from Engine import Engine
from Command_module import Comand_Module
from Warehouse import Warehouse
from Laser import Laser
from Biome import Biome
from Asteroid import Asteroid
from Container import Container
class Ship():
def __init__(self, screen):
self.x = 150
self.y = 75
self.distance = 0
self.aim_distance = 1000000
self.velocity = 10
self.under_control = True
self.map = [['n' for _ in range(30)] for _ in range(14)]
self.resourses = {'Fe': 100, 'Cu': 50, 'O2': 50, 'CO2': 50, 'Al': 50, 'Si': 50, 'U': 50, 'H2O': 50,
'food': 50, 'energy': 0, 'science': 0}
self.every_single_unit = {'energy': [], 'commands': [], 'food': [], 'storages': [], 'engines': [],
'science': [], 'defense': [], 'cabins': [],
'armor': []}
self.storages = {'energy': [], 'science': [], 'storages': []}
self.group = pygame.sprite.Group()
self.cannons = []
self.comand_modules = []
self.humans = 10
self.cell_size = 30
self.screen = screen
eng = Engine(self, 14, 7)
eng1 = Engine(self, 14, 9)
plant1 = PowerPlant(self, 18, 7)
plant2 = PowerPlant(self, 18, 9)
self.comand_module = Comand_Module(self, 16, 11)
bat1 = Battery(self, 20, 7)
bat2 = Battery(self, 20, 9)
biome1 = Biome(self, 22, 7)
biome2 = Biome(self, 22, 9)
lab1 = Lab(self, 17, 6)
farm = Farm(self, 24, 7)
ware = Warehouse(self, 20, 6)
ware.charges = {'Fe': 10000, 'Cu': 10000, 'O2': 10000, 'CO2': 10000, 'Al': 10000, 'Si': 10000, 'U': 10000,
'H2O': 10000, 'food': 10000}
arm = Armor(self, 23, 6)
arm = Armor(self, 23, 7)
arm = Armor(self, 23, 8)
laser1 = Laser(self, 3, 1)
laser2 = Laser(self, 8, 12)
for i in self.every_single_unit.keys():
for a in self.every_single_unit[i]:
self.group.add(a)
for i in self.storages.keys():
for unit in self.storages[i]:
unit.input()
self.new_group = pygame.sprite.Group()
self.new_group.add(self.comand_module)
self.storages_types = [Battery, Lab]
def destroy(self, unit):
self.group.remove(unit)
self.every_single_unit[unit.cat].remove(unit)
if type(unit) in self.storages_types:
self.storages[unit.cat].remove(unit)
unit.working = False
def dfs(self, sprite, visited):
visited.append(sprite)
for i in pygame.sprite.spritecollide(sprite, self.group, False):
if i not in visited:
self.new_group.add(i)
self.dfs(i, visited)
def blt(self):
self.surf = pygame.Surface((self.cell_size * len(self.map[0]), self.cell_size * len(self.map)), pygame.SRCALPHA)
for i in self.every_single_unit.keys():
for unit in self.every_single_unit[i]:
unit.new_image()
self.group.draw(self.screen)
def all_systems_check(self):
for i in self.group.sprites():
if i.health <= 0:
self.destroy(i)
self.dfs(self.comand_module, [])
for unit in self.group:
if unit not in self.new_group.sprites():
self.destroy(unit)
self.new_group = pygame.sprite.Group()
self.new_group.add(self.comand_module)
self.resourses = {'Fe': 0, 'Cu': 0, 'O2': 0, 'CO2': 0, 'Al': 0, 'Si': 0, 'U': 0, 'H2O': 0,
'food': 0, 'energy': 0, 'science': 0}
self.humans = 0
for i in self.every_single_unit['cabins']:
i.output()
for i in self.storages.keys():
for unit in self.storages[i]:
unit.output()
self.under_control = False
for i in self.comand_modules:
if i.working:
self.under_control = True
for cat in self.every_single_unit.keys():
for unit in self.every_single_unit[cat]:
unit.do()
for i in self.storages.keys():
for unit in self.storages[i]:
unit.input()
for i in self.every_single_unit['cabins']:
i.input()
def change(self, x, y):
for unit in self.group.sprites():
if unit.rect.collidepoint(x, y):
if unit.working:
unit.working = False
else:
unit.working = True
def move(self, nx, ox, ny, oy):
self.x = nx
self.y = ny
for cat in self.every_single_unit.keys():
for unit in self.every_single_unit[cat]:
unit.rect.move_ip(nx - ox, ny - oy)
def shoot(self, event_group):
for cannon in self.cannons:
if pygame.sprite.spritecollideany(cannon, event_group, pygame.sprite.collide_circle_ratio(3.5)) != None:
for i in [pygame.sprite.spritecollideany(cannon, event_group, pygame.sprite.collide_circle_ratio(3.5))]:
if type(i) == Asteroid:
cannon.shoot(i)
elif type(i) == Container:
cannon.grab(i)
for i in self.resourses.keys():
self.resourses[i] += random.randint(100, 100)
|
Martian2024/PyGame_Project
|
Ship.py
|
Ship.py
|
py
| 5,582 |
python
|
en
|
code
| 3 |
github-code
|
6
|
42602830723
|
from matplotlib import pyplot as plt
font = {'family':'sans-serif', 'sans-serif':'Arial'}
plt.rc('font', **font)
plt.title('', fontsize='x-large', pad=None)
plt.xlabel('', fontsize='x-large')
plt.ylabel('', fontsize='x-large')
# plt.xscale('log')
plt.tick_params(axis="both",direction="in", labelsize='x-large')
plt.subplots_adjust(left=0.30, bottom=0.30, right=0.70, top=0.70, wspace=0.20, hspace=0.20)
plt.legend(fontsize='large').set_draggable(True)
plt.grid(alpha=0.5)
|
hitergelei/tools
|
plt-format.py
|
plt-format.py
|
py
| 475 |
python
|
en
|
code
| 0 |
github-code
|
6
|
3528547150
|
import os
import pytest
from dvclive.data.scalar import Scalar
from dvclive.keras import DvcLiveCallback
from tests.test_main import read_logs
# pylint: disable=unused-argument, no-name-in-module, redefined-outer-name
@pytest.fixture
def xor_model():
import numpy as np
from tensorflow.python.keras import Sequential
from tensorflow.python.keras.layers import Activation, Dense
def make():
x = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
y = np.array([[0], [1], [1], [0]])
model = Sequential()
model.add(Dense(8, input_dim=2))
model.add(Activation("relu"))
model.add(Dense(1))
model.add(Activation("sigmoid"))
model.compile(
loss="binary_crossentropy", optimizer="sgd", metrics=["accuracy"]
)
return model, x, y
yield make
def test_keras_callback(tmp_dir, xor_model, capture_wrap):
model, x, y = xor_model()
model.fit(
x,
y,
epochs=1,
batch_size=1,
validation_split=0.2,
callbacks=[DvcLiveCallback()],
)
assert os.path.exists("dvclive")
logs, _ = read_logs(tmp_dir / "dvclive" / Scalar.subfolder)
assert os.path.join("train", "accuracy") in logs
assert os.path.join("eval", "accuracy") in logs
@pytest.mark.parametrize("save_weights_only", (True, False))
def test_keras_model_file(
tmp_dir, xor_model, mocker, save_weights_only, capture_wrap
):
model, x, y = xor_model()
save = mocker.spy(model, "save")
save_weights = mocker.spy(model, "save_weights")
model.fit(
x,
y,
epochs=1,
batch_size=1,
callbacks=[
DvcLiveCallback(
model_file="model.h5", save_weights_only=save_weights_only
)
],
)
assert save.call_count != save_weights_only
assert save_weights.call_count == save_weights_only
@pytest.mark.parametrize("save_weights_only", (True, False))
def test_keras_load_model_on_resume(
tmp_dir, xor_model, mocker, save_weights_only, capture_wrap
):
import dvclive.keras
model, x, y = xor_model()
if save_weights_only:
model.save_weights("model.h5")
else:
model.save("model.h5")
load_weights = mocker.spy(model, "load_weights")
load_model = mocker.spy(dvclive.keras, "load_model")
model.fit(
x,
y,
epochs=1,
batch_size=1,
callbacks=[
DvcLiveCallback(
model_file="model.h5",
save_weights_only=save_weights_only,
resume=True,
)
],
)
assert load_model.call_count != save_weights_only
assert load_weights.call_count == save_weights_only
def test_keras_no_resume_skip_load(tmp_dir, xor_model, mocker, capture_wrap):
model, x, y = xor_model()
model.save_weights("model.h5")
load_weights = mocker.spy(model, "load_weights")
model.fit(
x,
y,
epochs=1,
batch_size=1,
callbacks=[
DvcLiveCallback(
model_file="model.h5",
save_weights_only=True,
resume=False,
)
],
)
assert load_weights.call_count == 0
def test_keras_no_existing_model_file_skip_load(
tmp_dir, xor_model, mocker, capture_wrap
):
model, x, y = xor_model()
load_weights = mocker.spy(model, "load_weights")
model.fit(
x,
y,
epochs=1,
batch_size=1,
callbacks=[
DvcLiveCallback(
model_file="model.h5",
save_weights_only=True,
resume=True,
)
],
)
assert load_weights.call_count == 0
def test_keras_None_model_file_skip_load(
tmp_dir, xor_model, mocker, capture_wrap
):
model, x, y = xor_model()
model.save_weights("model.h5")
load_weights = mocker.spy(model, "load_weights")
model.fit(
x,
y,
epochs=1,
batch_size=1,
callbacks=[
DvcLiveCallback(
save_weights_only=True,
resume=True,
)
],
)
assert load_weights.call_count == 0
|
gshanko125298/Prompt-Engineering-In-context-learning-with-GPT-3-and-LLMs
|
myenve/Lib/site-packages/tests/test_keras.py
|
test_keras.py
|
py
| 4,219 |
python
|
en
|
code
| 3 |
github-code
|
6
|
71477711228
|
import sys
sys.stdin = open('input.txt')
def cook(i, n, group1, group2):
global selection, result
# 조합 결성시 return
if len(group1) == n//2 and len(group2) == n//2:
ans1 = 0
ans2 = 0
for i in range(n//2):
for j in range(i, n//2):
ans1 += data[group1[i]][group1[j]] + data[group1[j]][group1[i]]
ans2 += data[group2[i]][group2[j]]+ data[group2[j]][group2[i]]
result = min(result, abs(ans1-ans2))
return
# 조합 만들기
# 애초에 group1과 group2를 나눠서 두가지로 만든다.
if len(group1) < n//2 and len(group2) < n//2:
cook(i+1, n, group1+[i], group2)
cook(i+1, n, group1, group2+[i])
elif len(group1) < n//2:
cook(i + 1, n, group1 + [i], group2)
else:
cook(i + 1, n, group1, group2 + [i])
T = int(input())
for tc in range(1, T+1):
N = int(input())
data = [list(map(int, input().split())) for _ in range(N)]
selection = [False]*N
result = 20000 * N**2
cook(0, N, [], [])
print(f'#{tc}', result)
|
YOONJAHYUN/Python
|
SWEA/4012_cook/sol2.py
|
sol2.py
|
py
| 1,095 |
python
|
en
|
code
| 2 |
github-code
|
6
|
26248063876
|
from datetime import datetime
import six
from oslo_config import cfg
from oslo_log import log
from oslo_utils import uuidutils, importutils
from delfin import db
from delfin.common.constants import TelemetryCollection, TelemetryJobStatus
from delfin.exception import TaskNotFound
from delfin.i18n import _
from delfin.task_manager import rpcapi as task_rpcapi
from delfin.task_manager.scheduler import schedule_manager
from delfin.task_manager.tasks.telemetry import PerformanceCollectionTask
CONF = cfg.CONF
LOG = log.getLogger(__name__)
class JobHandler(object):
def __init__(self, ctx, task_id, storage_id, args, interval):
# create an object of periodic task scheduler
self.ctx = ctx
self.task_id = task_id
self.storage_id = storage_id
self.args = args
self.interval = interval
self.task_rpcapi = task_rpcapi.TaskAPI()
self.scheduler = schedule_manager.SchedulerManager().get_scheduler()
self.stopped = False
self.job_ids = set()
@staticmethod
def get_instance(ctx, task_id):
task = db.task_get(ctx, task_id)
return JobHandler(ctx, task_id, task['storage_id'],
task['args'], task['interval'])
def perform_history_collection(self, start_time, end_time, last_run_time):
# Trigger one historic collection to make sure we do not
# miss any Data points due to reschedule
LOG.debug('Triggering one historic collection for task %s',
self.task_id)
try:
telemetry = PerformanceCollectionTask()
ret = telemetry.collect(self.ctx, self.storage_id, self.args,
start_time, end_time)
LOG.debug('Historic collection performed for task %s with '
'result %s' % (self.task_id, ret))
db.task_update(self.ctx, self.task_id,
{'last_run_time': last_run_time})
except Exception as e:
msg = _("Failed to collect performance metrics during history "
"collection for storage id:{0}, reason:{1}"
.format(self.storage_id, six.text_type(e)))
LOG.error(msg)
def schedule_job(self, task_id):
if self.stopped:
# If Job is stopped return immediately
return
LOG.info("JobHandler received A job %s to schedule" % task_id)
job = db.task_get(self.ctx, task_id)
# Check delete status of the task
deleted = job['deleted']
if deleted:
return
collection_class = importutils.import_class(
job['method'])
instance = collection_class.get_instance(self.ctx, self.task_id)
current_time = int(datetime.now().timestamp())
last_run_time = current_time
next_collection_time = last_run_time + job['interval']
job_id = uuidutils.generate_uuid()
next_collection_time = datetime \
.fromtimestamp(next_collection_time) \
.strftime('%Y-%m-%d %H:%M:%S')
existing_job_id = job['job_id']
scheduler_job = self.scheduler.get_job(existing_job_id)
if not (existing_job_id and scheduler_job):
LOG.info('JobHandler scheduling a new job')
self.scheduler.add_job(
instance, 'interval', seconds=job['interval'],
next_run_time=next_collection_time, id=job_id,
misfire_grace_time=int(job['interval'] / 2))
update_task_dict = {'job_id': job_id}
db.task_update(self.ctx, self.task_id, update_task_dict)
self.job_ids.add(job_id)
LOG.info('Periodic collection tasks scheduled for for job id: '
'%s ' % self.task_id)
# Check if historic collection is needed for this task.
# If the last run time is already set, adjust start_time based on
# last run time or history_on_reschedule which is smaller
# If jod id is created but last run time is not yet set, then
# adjust start_time based on interval or history_on_reschedule
# whichever is smaller
end_time = current_time * 1000
# Maximum supported history duration on restart
history_on_reschedule = CONF.telemetry. \
performance_history_on_reschedule
if job['last_run_time']:
start_time = job['last_run_time'] * 1000 \
if current_time - job['last_run_time'] < \
history_on_reschedule \
else (end_time - history_on_reschedule * 1000)
self.perform_history_collection(start_time, end_time,
last_run_time)
elif existing_job_id:
interval_in_sec = job['interval']
start_time = (end_time - interval_in_sec * 1000) \
if interval_in_sec < history_on_reschedule \
else (end_time - history_on_reschedule * 1000)
self.perform_history_collection(start_time, end_time,
last_run_time)
else:
LOG.info('Job already exists with this scheduler')
def stop(self):
self.stopped = True
for job_id in self.job_ids.copy():
self.remove_scheduled_job(job_id)
LOG.info("Stopping telemetry jobs")
def remove_scheduled_job(self, job_id):
if job_id in self.job_ids:
self.job_ids.remove(job_id)
if job_id and self.scheduler.get_job(job_id):
self.scheduler.remove_job(job_id)
def remove_job(self, task_id):
try:
LOG.info("Received job %s to remove", task_id)
job = db.task_get(self.ctx, task_id)
job_id = job['job_id']
self.remove_scheduled_job(job_id)
except Exception as e:
LOG.error("Failed to remove periodic scheduling job , reason: %s.",
six.text_type(e))
class FailedJobHandler(object):
def __init__(self, ctx):
# create an object of periodic failed task scheduler
self.scheduler = schedule_manager.SchedulerManager().get_scheduler()
self.ctx = ctx
self.stopped = False
self.job_ids = set()
@staticmethod
def get_instance(ctx, failed_task_id):
return FailedJobHandler(ctx)
def schedule_failed_job(self, failed_task_id):
if self.stopped:
return
try:
job = db.failed_task_get(self.ctx, failed_task_id)
retry_count = job['retry_count']
result = job['result']
job_id = job['job_id']
if retry_count >= \
TelemetryCollection.MAX_FAILED_JOB_RETRY_COUNT or \
result == TelemetryJobStatus.FAILED_JOB_STATUS_SUCCESS:
LOG.info("Exiting Failure task processing for task [%d] "
"with result [%s] and retry count [%d] "
% (job['id'], result, retry_count))
self._teardown_task(self.ctx, job['id'], job_id)
return
# If job already scheduled, skip
if job_id and self.scheduler.get_job(job_id):
return
try:
db.task_get(self.ctx, job['task_id'])
except TaskNotFound as e:
LOG.info("Removing failed telemetry job as parent job "
"do not exist: %s", six.text_type(e))
# tear down if original task is not available
self._teardown_task(self.ctx, job['id'],
job_id)
return
if not (job_id and self.scheduler.get_job(job_id)):
job_id = uuidutils.generate_uuid()
db.failed_task_update(self.ctx, job['id'],
{'job_id': job_id})
collection_class = importutils.import_class(
job['method'])
instance = \
collection_class.get_instance(self.ctx, job['id'])
self.scheduler.add_job(
instance, 'interval',
seconds=job['interval'],
next_run_time=datetime.now(), id=job_id,
misfire_grace_time=int(job['interval'] / 2))
self.job_ids.add(job_id)
except Exception as e:
LOG.error("Failed to schedule retry tasks for performance "
"collection, reason: %s", six.text_type(e))
else:
LOG.info("Schedule collection completed")
def _teardown_task(self, ctx, failed_task_id, job_id):
db.failed_task_delete(ctx, failed_task_id)
self.remove_scheduled_job(job_id)
def remove_scheduled_job(self, job_id):
if job_id in self.job_ids:
self.job_ids.remove(job_id)
if job_id and self.scheduler.get_job(job_id):
self.scheduler.remove_job(job_id)
def stop(self):
self.stopped = True
for job_id in self.job_ids.copy():
self.remove_scheduled_job(job_id)
def remove_failed_job(self, failed_task_id):
try:
LOG.info("Received failed job %s to remove", failed_task_id)
job = db.failed_task_get(self.ctx, failed_task_id)
job_id = job['job_id']
self.remove_scheduled_job(job_id)
db.failed_task_delete(self.ctx, job['id'])
LOG.info("Removed failed_task entry %s ", job['id'])
except Exception as e:
LOG.error("Failed to remove periodic scheduling job , reason: %s.",
six.text_type(e))
@classmethod
def job_interval(cls):
return TelemetryCollection.FAILED_JOB_SCHEDULE_INTERVAL
|
sodafoundation/delfin
|
delfin/task_manager/scheduler/schedulers/telemetry/job_handler.py
|
job_handler.py
|
py
| 9,923 |
python
|
en
|
code
| 201 |
github-code
|
6
|
23188032907
|
# link : https://school.programmers.co.kr/learn/courses/30/lessons/172928
# title : 공원 산책
def solution(park, routes):
for r_idx, i in enumerate(park):
for c_idx, j in enumerate(i):
if(j == "S"):
dog = Dog(park, c_idx, r_idx)
for inst in routes :
arg = inst.split(" ")
if arg[0] == "E":
dog.Right(int(arg[1]))
elif arg[0] == "W":
dog.Left(int(arg[1]))
elif arg[0] == "N":
dog.Up(int(arg[1]))
else :
dog.Down(int(arg[1]))
print(dog.getPos())
return dog.getPos()
class Dog :
def __init__(self, park, xpos, ypos) :
self.park = park
self.xpos = xpos
self.ypos = ypos
def Right(self, step) :
for i in range(self.xpos+1, self.xpos+step+1) :
if (i >= len(self.park[0])) or (self.park[self.ypos][i] == "X") :
return
self.xpos += step
def Left(self, step) :
for i in range(self.xpos-1, self.xpos-step-1, -1) :
if (i < 0) or (self.park[self.ypos][i] == "X") :
return
self.xpos -= step
def Up(self, step) :
for i in range(self.ypos-1, self.ypos-step-1, -1) :
if (i < 0) or (self.park[i][self.xpos] == "X") :
return
self.ypos -= step
def Down(self, step) :
# x.pos : 2, y.pos:0 0+2+1
for i in range(self.ypos+1, self.ypos+step+1) :
if (i >= len(self.park)) or (self.park[i][self.xpos] == "X") :
return
self.ypos += step
def getPos(self) :
return [self.ypos, self.xpos]
park = ["SOO","OXX","OOO"]
routes = ["E 2","S 2","W 1"]
print(solution(park, routes))
|
yuseung0429/CodingTest
|
Programmers/Python/solved/Problem_172928.py
|
Problem_172928.py
|
py
| 1,740 |
python
|
en
|
code
| 0 |
github-code
|
6
|
44793171293
|
class Solution:
def levelOrder(self, root):
# ans = []
que = []
if root is None:
return
que.append(root)
while len(que) > 0:
data = que.pop(0)
if data.left:
que.append(data.left)
if data.right:
que.append(data.right)
print(data.data, end=' ')
# Solution.ans.append(data.data)
class Node:
def __init__(self, val):
self.right = None
self.data = val
self.left = None
node1 = Node(1)
node2 = Node(2)
node3 = Node(3)
node4 = Node(4)
node5 = Node(5)
node6 = Node(6)
node7 = Node(7)
node8 = Node(8)
node9 = Node(9)
node10 = Node(10)
node11 = Node(11)
node12 = Node(12)
node13 = Node(13)
node14 = Node(14)
node15 = Node(15)
node1.right = node2
node2.right = node3
node1.left = node4
node4.left = node5
node4.right = node6
node5.left = node7
node5.right = node8
node8.left = node9
node9.left = node10
node9.right = node11
node6.right = node12
node12.left = node13
node12.right = node14
node14.right= node15
sol = Solution()
sol.levelOrder(node1)
print(sol.ans)
|
Shwaubh/LoveBabbarSolution
|
Binary Trees/Solution174LevelOrderTravesal.py
|
Solution174LevelOrderTravesal.py
|
py
| 1,131 |
python
|
en
|
code
| 2 |
github-code
|
6
|
18769333881
|
while(True):
m,n = map(int,input().split())
if not m: break
a = list(range(1,m+1))
b = [input() for _ in range(n)]
t = [str(i+1) for i in range(n)]
t[2::3] = ["Fizz"]*len(t[2::3])
t[4::5] = ["Buzz"]*len(t[4::5])
t[14::15] = ["FizzBuzz"]*len(t[14::15])
i=0
for j in range(n):
if len(a) <2: break
if b[j] != t[j]: del a[i]; i = 0 if i >= len(a) else i; continue
i += 1
i = i%len(a)
print(" ".join(str(e) for e in a))
|
ehki/AOJ_challenge
|
python/0221.py
|
0221.py
|
py
| 494 |
python
|
en
|
code
| 0 |
github-code
|
6
|
37539866054
|
import corexyLib
import motorLib
from time import sleep
import RPi.GPIO as GPIO
SQUARE_SIDE = 57.3
resolution = 'Half'
corexy = corexyLib.CoreXY(20, resolution, motorLib.Motor(20, 21, 1, (14, 15, 18), 400, resolution), motorLib.Motor(19, 26, 1, (14, 15, 18), 400, resolution), 0, 0)
corexy.motorA.initial_set_up()
corexy.motorB.initial_set_up()
GPIO.setup(5, GPIO.OUT)
# Assume origin at the up left corner of square(1,1)
def cal_xy_coor(row, col):
x_coor = ((row-1)+ (1/2)) * SQUARE_SIDE
y_coor = ((col-1) + (1/2)) *SQUARE_SIDE
return x_coor, y_coor
GPIO.output(5, GPIO.HIGH)
while(True):
corexy.move_up(2500)
corexy.move_down(2500)
corexy.move_left(2500)
corexy.move_right(2500)
corexy.move_down(2500)
corexy.move_up(2500)
corexy.move_right(2500)
corexy.move_left(2500)
GPIO.output(5, GPIO.LOW)
corexy.move_up(2500)
corexy.move_down(2500)
# corexy.move_left(5424)
# corexy.move_down(6630)
# corexy.motorA.motor_steps(5000)
# corexy.motorA.set_counter_clockwise()
# corexy.motorA.motor_steps(5000)
# while True:
# row = int(input("Enter row: "))
# col = int(input("Enter col: "))
# new_x_coor, new_y_coor = cal_xy_coor(row, col)
# corexy.move_to(new_x_coor, new_y_coor)
|
Pearston/ReChess
|
motorEmbeddedCode/test_corexy.py
|
test_corexy.py
|
py
| 1,248 |
python
|
en
|
code
| 0 |
github-code
|
6
|
5648159483
|
import sys
from typing import List, Optional, Tuple, cast
import unittest
def how_construct(target_string: str, strings: List[str]) -> Optional[List[str]]:
n = len(target_string) + 1
table: List[Optional[List[str]]] = [
[] if i == 0 else None for i in range(n)]
for i in range(n):
if table[i] is not None:
for string in strings:
j = i + len(string)
if j < n and target_string[i: j] == string:
table[j] = [*cast(List[str], table[i]), string]
return table[len(target_string)]
class SolutionTest(unittest.TestCase):
def test_solution(self):
sys.setrecursionlimit(10000)
fixtures = [
(
("abcdef", ["ab", "abc", "cd", "def", "abcd"]),
["abc", "def"],
),
(
("skateboard", ["bo", "rd", "ate", "t", "ska", "sk", "boar"]),
None,
),
(
("", ["cat", "dog", "mouse"]),
[],
),
]
for inputs, output in fixtures:
solution = how_construct(*inputs)
if solution:
self.assertEqual(sorted(output), sorted(solution))
else:
self.assertEqual(output, solution)
|
bradtreloar/freeCodeCamp_DP_problems
|
problems/tabulated/how_construct.py
|
how_construct.py
|
py
| 1,311 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21002262598
|
import os
import pandas as pd
from natsort import natsorted
from openpyxl import load_workbook
dirName = './pozyxAPI_dane_pomiarowe'
def parse_learn_data():
data = pd.DataFrame()
for filename in natsorted(os.listdir(dirName)):
if filename.endswith(".xlsx"):
df = pd.read_excel(f"{dirName}/{filename}")
df = df[['0/timestamp', 't', 'no', 'measurement x', 'measurement y', 'reference x', 'reference y']]
data = data.append(df, ignore_index=True)
df = pd.read_excel("./pozyxAPI_only_localization_dane_testowe_i_dystrybuanta.xlsx")
df = df[['0/timestamp', 't', 'no', 'measurement x', 'measurement y', 'reference x', 'reference y']]
data = data.append(df, ignore_index=True)
data.to_csv("./dataset.csv")
def add_results_to_main_excel(predict_test):
df_to_export = pd.DataFrame(predict_test)
with pd.ExcelWriter('./tmp.xlsx') as writer:
df_to_export.to_excel(writer, sheet_name='Results', index=False)
export_workbook = load_workbook(filename='./tmp.xlsx')
export_sheet = export_workbook.active
target_workbook = load_workbook(filename='./pozyxAPI_only_localization_dane_testowe_i_dystrybuanta.xlsx')
target_sheet = target_workbook.active
export_values = []
for value in export_sheet.iter_rows(min_row=2, max_col=2, values_only=True):
export_values.append(value)
os.remove('tmp.xlsx')
for it in range(1, export_values.__len__()+1):
target_sheet[f'O{it + 1}'] = export_values[it-1][0]
target_sheet[f'P{it + 1}'] = export_values[it-1][1]
target_workbook.save('./Results.xlsx')
|
precel120/SISE
|
Task 2/excel.py
|
excel.py
|
py
| 1,618 |
python
|
en
|
code
| 0 |
github-code
|
6
|
36837442603
|
"""
Ex 7.
Program description: The objective of this program is to create a function named “print date” which will print in the format (from previously created list ).
The format is Month/Day/Year.
"""
days = ["Friday", "Saturday", "Sunday", "Monday", "Tuesday", "Wensday", "Thursday"]
year = 2021
month = 1
day = 23
def next():
if(day<31):
day+=1
else:
day = 0
month+=1
if(month == 12):
month = 1
year+=1
def find_day(day, month, year):
total = (day-1) + (month-1)*30 + (360 * (year-2021))
return days[total%7]
def print_date():
return find_day(day,month,year) + ", " + str(month) + "/" + str(day) + "/" + str(year)
print(print_date())
|
Deepsphere-AI/AI-lab-Schools
|
Grade 09/Unit-1/Python/Nine_PPT_7.py
|
Nine_PPT_7.py
|
py
| 682 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27569063700
|
class CityPlan:
def __init__(self, city_plan):
self.position = city_plan["position"]
self.criteria = city_plan["criteria"]
self.score1 = city_plan["score1"]
self.score2 = city_plan["score2"]
@property
def criteria(self):
return self._criteria
@property
def position(self):
return self._position
@property
def score1(self):
return self._score1
@property
def score2(self):
return self._score2
@criteria.setter
def criteria(self, criteria: list):
'''
-> list[int]
'''
assert type(criteria) == list or type(criteria) == str
if type(criteria) == list and all(type(i) == int for i in criteria):
assert criteria == sorted(criteria)
else:
criteria_one = [["all houses", 0], ["all houses", 2], "end houses", "7 temps", "5 bis"]
criteria_two = ["two streets all parks", "two streets all pools", ["all pools all parks", 1], ["all pools all parks", 2], "all pools all parks one roundabout"]
if self.position == 1:
assert criteria in criteria_one
elif self.position == 2:
assert criteria in criteria_two
else:
raise Exception("Invalid Position")
self._criteria = criteria
@position.setter
def position(self, position: int):
'''
-> int
'''
assert 1 <= position <= 3 and type(position) == int
self._position = position
@score1.setter
def score1(self, score1: int):
'''
-> int
'''
assert 0 <= score1 and type(score1) == int
self._score1 = score1
@score2.setter
def score2(self, score2: int):
'''
-> int
'''
assert 0 <= score2 and type(score2) == int
self._score2 = score2
|
maxlou188/WelcomeTo
|
city_plan.py
|
city_plan.py
|
py
| 1,977 |
python
|
en
|
code
| 0 |
github-code
|
6
|
39380955921
|
#%% Imports
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from collections import defaultdict
from helpers import pairwiseDistCorr,nn_reg,nn_arch,reconstructionError
from matplotlib import cm
from sklearn.neural_network import MLPClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.random_projection import SparseRandomProjection, GaussianRandomProjection
from itertools import product
from helpers import get_data_from_csv
out = './RP/'
cmap = cm.get_cmap('Spectral')
np.random.seed(42)
# import wine quality data
wineX, wineY = get_data_from_csv("./BASE/wine_trg.csv", n_features=11, sep=',', header=None)
digitX, digitY = get_data_from_csv("./BASE/digit_trg.csv", n_features=256, sep=',', header=None)
wineX = StandardScaler().fit_transform(wineX)
digitX = StandardScaler().fit_transform(digitX)
clusters = [2,5,10,15,20,25,30,35,40]
dims = [2,5,10,15,20,25,30,35,40,45,50,55,60]
dims_wine = [i for i in range(2,12)]
# data for 1
tmp = defaultdict(dict)
for i,dim in product(range(10),dims_wine):
rp = SparseRandomProjection(random_state=i, n_components=dim)
tmp[dim][i] = pairwiseDistCorr(rp.fit_transform(wineX), wineX)
tmp =pd.DataFrame(tmp).T
tmp.to_csv(out+'wine scree1.csv')
tmp = defaultdict(dict)
for i,dim in product(range(10),dims):
rp = SparseRandomProjection(random_state=i, n_components=dim)
tmp[dim][i] = pairwiseDistCorr(rp.fit_transform(digitX), digitX)
tmp =pd.DataFrame(tmp).T
tmp.to_csv(out+'digit scree1.csv')
tmp = defaultdict(dict)
for i,dim in product(range(10),dims_wine):
rp = SparseRandomProjection(random_state=i, n_components=dim)
rp.fit(wineX)
tmp[dim][i] = reconstructionError(rp, wineX)
tmp =pd.DataFrame(tmp).T
tmp.to_csv(out+'wine scree2.csv')
tmp = defaultdict(dict)
for i,dim in product(range(10),dims):
rp = SparseRandomProjection(random_state=i, n_components=dim)
rp.fit(digitX)
tmp[dim][i] = reconstructionError(rp, digitX)
tmp =pd.DataFrame(tmp).T
tmp.to_csv(out+'digit scree2.csv')
# Data for 2
grid ={'rp__n_components':dims_wine,'NN__alpha':nn_reg,'NN__hidden_layer_sizes':nn_arch}
rp = SparseRandomProjection(random_state=5)
mlp = MLPClassifier(activation='relu',max_iter=2000,early_stopping=True,random_state=5)
pipe = Pipeline([('rp',rp),('NN',mlp)])
gs = GridSearchCV(pipe,grid,verbose=10,cv=5)
gs.fit(wineX,wineY)
tmp = pd.DataFrame(gs.cv_results_)
tmp.to_csv(out+'wine dim red.csv')
grid ={'rp__n_components':dims,'NN__alpha':nn_reg,'NN__hidden_layer_sizes':nn_arch}
rp = SparseRandomProjection(random_state=5)
mlp = MLPClassifier(activation='relu',max_iter=2000,early_stopping=True,random_state=5)
pipe = Pipeline([('rp',rp),('NN',mlp)])
gs = GridSearchCV(pipe,grid,verbose=10,cv=5)
gs.fit(digitX,digitY)
tmp = pd.DataFrame(gs.cv_results_)
tmp.to_csv(out+'digit dim red.csv')
# data for 3
# Set this from chart 2 and dump, use clustering script to finish up
dim = 6
rp = SparseRandomProjection(n_components=dim,random_state=5)
wineX2 = rp.fit_transform(wineX)
wine2 = pd.DataFrame(np.hstack((wineX2,np.atleast_2d(wineY))))
cols = list(range(wine2.shape[1]))
cols[-1] = 'Class'
wine2.columns = cols
wine2.to_csv(out+'wine_datasets.csv',index=False,header=False)
dim = 60
rp = SparseRandomProjection(n_components=dim,random_state=5)
digitX2 = rp.fit_transform(digitX)
digit2 = pd.DataFrame(np.hstack((digitX2,np.atleast_2d(digitY))))
cols = list(range(digit2.shape[1]))
cols[-1] = 'Class'
digit2.columns = cols
digit2.to_csv(out+'digit_datasets.csv',index=False,header=False)
|
SenRamakri/CS-7641-Assignment-3
|
RP.py
|
RP.py
|
py
| 3,594 |
python
|
en
|
code
| 0 |
github-code
|
6
|
18131109931
|
import sys
N = int(input())
Nums = list(map(int, sys.stdin.readline().split()))
M = int(input())
Mums = list(map(int, sys.stdin.readline().split()))
Nums.sort()
def binary_search(answer):
start = 0
end = N - 1
while start <= end:
mid = (start + end) // 2
if Nums[mid] == answer:
return True
elif Nums[mid] < answer:
start = mid + 1
else:
end = mid - 1
return False
for i in range(0, M):
if (binary_search(Mums[i]) == True):
print("1")
else:
print(0)
|
Hyeneung-Kwon/Baekjoon_Python
|
1920.py
|
1920.py
|
py
| 563 |
python
|
en
|
code
| 0 |
github-code
|
6
|
45478158273
|
# Parse Data from hex data helper class.
from common.types import *
class ParseHelper:
@staticmethod
def getData(rawdata, format_info):
data_type = format_info[0]
if(data_type==DataType.NUM):
return ParseHelper.getNumber(rawdata, format_info[1], format_info[2])
elif(data_type==DataType.HEX):
return ParseHelper.getHEX(rawdata, format_info[1], format_info[2])
elif(data_type==DataType.MAC):
return ParseHelper.getMACFormat(rawdata, format_info[1], format_info[2])
elif(data_type==DataType.IP):
return ParseHelper.getIPFormat(rawdata, format_info[1], format_info[2])
elif(data_type==DataType.IPv6):
return ParseHelper.getIPv6Format(rawdata, format_info[1], format_info[2])
return None # Type isn't specified.
@staticmethod
def getNumber(rawdata, start, length) -> int:
start_padding = 0
end_padding = 0
if (start) % 4 != 0:
start_padding = start % 4
if (length) % 4 != 0:
end_padding = 4 - length % 4
temp_hex = rawdata[(start+start_padding)//4:(start+length+end_padding)//4]
temp_int = bin(int(temp_hex, 16))
#print("getNumber({},{},{}) ==> {} ==> {}".format(rawdata, (start+start_padding)//4, (start+length+end_padding)//4, temp_hex, temp_int))
temp_int = temp_int[2:2+length]
return int(temp_int, 2)
@staticmethod
def getHEX(rawdata, start, length) -> str:
start_padding = 0
end_padding = 0
if (start) % 4 != 0:
start_padding = start % 4
if (length) % 4 != 0:
end_padding = 4 - length % 4
temp_hex = rawdata[(start+start_padding)//4:(start+length+end_padding)//4]
temp_int = bin(int(temp_hex, 16))
#print("getNumber({},{},{}) ==> {} ==> {}".format(rawdata, (start+start_padding)//4, (start+length+end_padding)//4, temp_hex, temp_int))
temp_int = temp_int[2:2+length]
return hex(int(temp_int, 2))
#return rawdata[start:start+length]
@staticmethod
def getMACFormat(rawdata, start, length) -> str:
temp_lst = []
for i in range(start//4,(start+length)//4,2):
temp_lst.append(rawdata[i: i+2])
mac = ":".join(temp_lst)
return mac.upper()
@staticmethod
def getIPFormat(rawdata, start, length) -> str:
temp_lst = []
for i in range(start//4,(start+length)//4,2):
temp_lst.append(str(int(rawdata[i: i+2],16)))
ip = ".".join(temp_lst)
return ip
@staticmethod
def getIPv6Format(rawdata, start, length) -> str:
temp_lst = []
for i in range(start//4,(start+length)//4,2):
temp_lst.append(rawdata[i: i+2])
mac = ":".join(temp_lst)
for i in range(16,1,-1):
candidate = ("00:"*i)[:-1]
if mac.find(candidate) > -1:
mac = mac.replace(candidate,"",1)
break
mac = mac.replace("00","0")
return mac.upper()
|
Bitbyul/protocol-analyzer
|
model/parseHelper.py
|
parseHelper.py
|
py
| 3,078 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30510489975
|
# coding=utf-8
import hashlib
from falcon.errors import HTTPBadRequest
from ultros_site.base_route import BaseRoute
__author__ = "Gareth Coles"
class ProfileRoute(BaseRoute):
route = "/profile"
def on_get(self, req, resp):
user = req.context["user"]
if not user:
raise HTTPBadRequest()
self.render_template(
req, resp, "users/profile.html",
user=user,
avatar="https://www.gravatar.com/avatar/{}".format(self.gravatar_hash(user.email))
)
def gravatar_hash(self, email: str):
email = email.strip()
email = email.lower()
email = email.encode("UTF-8")
return hashlib.md5(email).hexdigest()
|
UltrosBot/Ultros-site
|
ultros_site/routes/users/profile.py
|
profile.py
|
py
| 719 |
python
|
en
|
code
| 2 |
github-code
|
6
|
26471185861
|
import unittest
from solution.batch4.problem47 import consecutive_distinct_primes, \
first_4_distinct_primes
class DistinctPrimesFactors(unittest.TestCase):
def test_HR_problem_k2(self):
k = 2
nums = [20, 100]
expected = [
[14, 20],
[14, 20, 21, 33, 34, 35, 38, 39, 44, 45, 50, 51, 54, 55,
56, 57, 62, 68, 74, 75, 76, 85, 86, 87, 91, 92, 93, 94, 95, 98, 99]
]
for i, n in enumerate(nums):
self.assertEqual(expected[i], consecutive_distinct_primes(n, k))
def test_HR_problem_k3(self):
k = 3
nums = [644, 1000]
expected = [[644], [644, 740, 804, 986]]
for i, n in enumerate(nums):
self.assertEqual(expected[i], consecutive_distinct_primes(n, k))
def test_HR_problem_k4(self):
k = 4
nums = [10_000, 100_000, 300_000]
expected = [[], [], [134_043, 238_203, 253_894, 259_368]]
for i, n in enumerate(nums):
self.assertEqual(expected[i], consecutive_distinct_primes(n, k))
def test_PE_problem(self):
expected = 134_043
self.assertEqual(expected, first_4_distinct_primes())
if __name__ == '__main__':
unittest.main()
|
bog-walk/project-euler-python
|
test/batch4/test_problem47.py
|
test_problem47.py
|
py
| 1,245 |
python
|
en
|
code
| 0 |
github-code
|
6
|
5906135972
|
#!/usr/bin/env python
import rospy
import math
import sys
import tf
from el2425_bitcraze.srv import SetTargetPosition
from el2425_bitcraze.srv import SetPolygonTrajectory
from geometry_msgs.msg import PoseStamped
from geometry_msgs.msg import Point
rate = 5
deltaTheta = 20
class PolygonTrajectoryPlanner:
def __init__(self):
self.rate = rospy.Rate(rate) # 3hz
self.goal = [0, 0, 0]
rospy.Subscriber('goal', PoseStamped, self.cfGoalCallback)
self.setTargetPosition = rospy.ServiceProxy('set_target_position', SetTargetPosition)
rospy.Service('set_polygon_trajectory', SetPolygonTrajectory, self.polygonTrajectoryCallback)
self.planTrajectory = False
self.onCircle = False
def polygonTrajectoryCallback(self, req):
self.center = req.center
self.r = req.r
self.xrotation = req.xrotation
self.onCircle = False
self.theta = 0
if not self.planTrajectory:
self.planTrajectory = True
return()
def run(self):
self.theta = 0
while not rospy.is_shutdown():
if self.planTrajectory:
if self.isOnCircle():
self.onCircle = True
theta_rad = math.pi*self.theta/180
x_new = self.center[0] + self.r*math.cos(theta_rad)
y_new = self.center[1] + self.r*math.sin(theta_rad)*math.cos(self.xrotation*math.pi/180)
z_new = self.center[2] - self.r*math.sin(theta_rad)*math.sin(self.xrotation*math.pi/180)
# step/ increment of 5 degrees.
# this step decides the shape of the cirlce
# if we change it to 90 degrees the tarjectory will become square
distToTarget = math.sqrt(math.pow(self.goal[0]-x_new,2) + math.pow(self.goal[1]-y_new,2) + math.pow(self.goal[2]-z_new,2))
if distToTarget <= 0.1:
self.theta = self.theta + deltaTheta #10
#print "Point: %f" %(self.point.x)
if self.theta >= 360:
self.theta = 0
else:
x_new = self.center[0] + self.r
y_new = self.center[1]
z_new = self.center[2]
self.setTargetPosition(x_new, y_new, z_new)
self.rate.sleep()
def isOnCircle(self):
if self.onCircle:
return True
else:
x = self.goal[0]
y = self.goal[1]
z = self.goal[2]
x0 = self.center[0] + self.r
y0 = self.center[1]
z0 = self.center[2]
dist = math.sqrt(math.pow(x-x0,2) + math.pow(y-y0,2) + math.pow(z-z0,2))
return dist <= 0.1
def cfGoalCallback(self, goal):
self.goal = [goal.pose.position.x, goal.pose.position.y, goal.pose.position.z]
if __name__ == "__main__":
rospy.init_node("trajectory_handler")
trajHandler = PolygonTrajectoryPlanner()
trajHandler.run()
|
SabirNY/el2425_bitcraze
|
scripts/trajectory_handler.py
|
trajectory_handler.py
|
py
| 3,126 |
python
|
en
|
code
| null |
github-code
|
6
|
21402626105
|
import numpy as np
import tensorflow as tf
from pyTasks.task import Task, Parameter
from pyTasks.task import Optional, containerHash
from pyTasks.target import CachedTarget, LocalTarget
from pyTasks.target import JsonService, FileTarget
from .gram_tasks import PrepareKernelTask
import logging
import math
from time import time
import os
from tensorflow.contrib.tensorboard.plugins import projector
from scipy.spatial.distance import cdist
from .graph_tasks import EdgeType
class WVSkipgram(object):
def __init__(self, num_words, learning_rate, embedding_size,
num_steps, neg_sampling, unigrams, log="./log/"):
self.num_words = num_words
self.learning_rate = learning_rate
self.embedding_size = embedding_size
self.num_steps = num_steps
self.neg_sampling = neg_sampling
self.unigrams = unigrams
self.log_dir = log
self.graph, self.batch_inputs, self.batch_labels,self.normalized_embeddings,\
self.loss, self.optimizer = self.trainer_initial()
def trainer_initial(self):
graph = tf.Graph()
with graph.as_default():
# logging
self.logger = tf.summary.FileWriter(self.log_dir)
with tf.name_scope("embedding"):
batch_inputs = tf.placeholder(tf.int64, shape=([None, ]))
batch_labels = tf.placeholder(tf.int64, shape=([None, 1]))
graph_embeddings = tf.Variable(
tf.random_uniform([self.num_words, self.embedding_size], -0.5 / self.embedding_size, 0.5/self.embedding_size),
name='word_embedding')
batch_graph_embeddings = tf.nn.embedding_lookup(graph_embeddings, batch_inputs) #hiddeb layer
weights = tf.Variable(tf.truncated_normal([self.num_words, self.embedding_size],
stddev=1.0 / math.sqrt(self.embedding_size))) #output layer wt
biases = tf.Variable(tf.zeros(self.num_words)) #output layer biases
#negative sampling part
loss = tf.reduce_mean(
tf.nn.nce_loss(weights=weights,
biases=biases,
labels=batch_labels,
inputs=batch_graph_embeddings,
num_sampled=self.neg_sampling,
num_classes=self.num_words,
sampled_values=tf.nn.fixed_unigram_candidate_sampler(
true_classes=batch_labels,
num_true=1,
num_sampled=self.neg_sampling,
unique=True,
range_max=self.num_words,
distortion=0.75,
unigrams=self.unigrams)#word_id_freq_map_as_list is the
# frequency of each word in vocabulary
))
norm = tf.sqrt(tf.reduce_mean(tf.square(graph_embeddings), 1, keep_dims=True))
normalized_embeddings = graph_embeddings/norm
# summary
tf.summary.histogram("weights", weights)
tf.summary.histogram("biases", biases)
tf.summary.scalar("loss", loss)
config = projector.ProjectorConfig()
emb = config.embeddings.add()
emb.tensor_name = normalized_embeddings.name
emb.metadata_path = os.path.join(self.log_dir, 'vocab.tsv')
projector.visualize_embeddings(self.logger, config)
with tf.name_scope('descent'):
global_step = tf.Variable(0, trainable=False)
learning_rate = tf.train.exponential_decay(self.learning_rate,
global_step, 100000, 0.96, staircase=True) #linear decay over time
learning_rate = tf.maximum(learning_rate,0.001) #cannot go below 0.001 to ensure at least a minimal learning
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss,global_step=global_step)
self.logger.add_graph(graph)
return graph, batch_inputs, batch_labels, normalized_embeddings, loss, optimizer
def train(self, dataset):
with tf.Session(graph=self.graph,
config=tf.ConfigProto(log_device_placement=True,allow_soft_placement=False)) as sess:
merged_summary = tf.summary.merge_all()
saver = tf.train.Saver()
init = tf.global_variables_initializer()
sess.run(init)
sess.run(tf.tables_initializer())
step = 0
for i in range(self.num_steps):
t0 = time()
feed_it = dataset.make_initializable_iterator()
next_element = feed_it.get_next()
sess.run(feed_it.initializer)
while True:
try:
feed_dict = sess.run([next_element])
feed_dict = {self.batch_inputs: feed_dict[0][0],
self.batch_labels:
sess.run(
tf.reshape(feed_dict[0][1], [-1, 1])
)
}
loss_val = 0
_, loss_val = sess.run([self.optimizer, self.loss], feed_dict=feed_dict)
if step % 10 == 0:
s = sess.run(merged_summary, feed_dict=feed_dict)
self.logger.add_summary(s, step)
if step % 1000 == 0:
saver.save(sess, os.path.join(self.log_dir, "model.ckpt"), step)
step += 1
except tf.errors.OutOfRangeError:
break
epoch_time = time() - t0
loss = 0
#done with training
final_embeddings = self.normalized_embeddings.eval()
return final_embeddings
def collect_ast(G, nodes):
stack = []
stack.extend(nodes)
out = []
while len(stack) > 0:
act = stack.pop()
out.append(act)
for in_node, _, _, d in G.in_edges(act, keys=True, data='type'):
if d is EdgeType.se:
stack.append(in_node)
return out
def is_ast_node(G, node):
ast_node = True
for out_node, _, _, d in G.out_edges(node, keys=True, data='type'):
ast_node &= d is EdgeType.se
for out_node, _, _, d in G.in_edges(node, keys=True, data='type'):
ast_node &= d is EdgeType.se
return ast_node
class WVGraphSentenceTask(Task):
out_dir = Parameter('./w2v/sentences/')
def __init__(self, name, h, D):
self.name = name
self.h = h
self.D = D
def require(self):
return PrepareKernelTask(self.name, self.h, self.D)
def output(self):
path = self.out_dir.value + self.__taskid__() + '.txt'
return FileTarget(path)
def __taskid__(self):
return 'W2VGraphSentence_%s_%d_%d' % (self.name, self.h, self.D)
def run(self):
with self.input()[0] as i:
G = i.query()
L = []
with self.output() as output:
for node in G:
in_nodes = []
ast_nodes = []
for in_node, _, _, d in G.in_edges(node, keys=True, data='type'):
if d is EdgeType.se:
ast_nodes.append(in_node)
elif d is EdgeType.de:
in_nodes.append(in_node)
in_nodes.extend(collect_ast(G, ast_nodes))
if len(in_nodes) == 0:
continue
in_nodes = [G.node[n]['label'] for n in in_nodes]
output.write(
str(G.node[node]['label']) + ' ' + ' '.join(in_nodes)+'\n'
)
class WVVocabulary(Task):
out_dir = Parameter('./w2v/')
def __init__(self, graph_list, length, h, D):
self.graph_list = graph_list
self.h = h
self.D = D
self.length = length
def require(self):
return [
WVGraphSentenceTask(
name,
self.h,
self.D
)
for name in self.graph_list
]
def output(self):
path = self.out_dir.value + self.__taskid__() + '.json'
return CachedTarget(
LocalTarget(path, service=JsonService)
)
def __taskid__(self):
return 'W2VVocabulary_%d_%d_%d' % (self.h, self.D,
containerHash(self.graph_list))
def run(self):
vocab = {}
overall = 0
for inp in self.input():
with inp as i:
for line in i.readlines():
for w in line.split():
if w not in vocab:
vocab[w] = 0
vocab[w] += 1
overall += 1
vocab = [x for x in sorted(
list(vocab.items()), key=lambda x: x[1], reverse=True
)][:self.length]
vocab = {k[0]: (v, k[1]) for v, k in enumerate(vocab)}
print('### Parsed %s samples ###' % overall)
with self.output() as o:
o.emit(vocab)
class WVEmbeddingTask(Task):
out_dir = Parameter('./w2v/')
embedding_size = Parameter(10)
learning_rate = Parameter(0.001)
num_steps = Parameter(3)
neg_sampling = Parameter(15)
batch_size = Parameter(100)
log_dir = Parameter('./log/embedded/')
def __init__(self, graph_list, length, h, D):
self.graph_list = graph_list
self.h = h
self.D = D
self.length = length
def require(self):
out = [WVVocabulary(self.graph_list, self.length, self.h, self.D)]
out.extend([
WVGraphSentenceTask(
name,
self.h,
self.D
)
for name in self.graph_list
])
return out
def output(self):
path = self.out_dir.value + self.__taskid__() + '.json'
return CachedTarget(
LocalTarget(path, service=JsonService)
)
def __taskid__(self):
return 'W2VEmbeddingTask_%d_%d_%d' % (self.h, self.D,
containerHash(self.graph_list))
def _get_vocab(self, vocab):
vocab = [x[0] for x in
sorted(list(vocab.items()),
key=lambda v: v[1][0])]
with open(os.path.join(self.log_dir.value, 'vocab.tsv'), 'w') as o:
for v in vocab:
o.write(v+'\n')
return vocab
def run(self):
with self.input()[0] as i:
vocab = i.query()
inp = (self.input()[i] for i in range(1, len(self.input())))
filenames = [f.sandBox + f.path for f in inp]
unigrams = [x[1][1] for x in
sorted(list(vocab.items()),
key=lambda v: v[1][0])]
model_skipgram = WVSkipgram(
len(vocab),
self.learning_rate.value,
self.embedding_size.value,
self.num_steps.value,
self.neg_sampling.value,
unigrams,
self.log_dir.value
)
with tf.Session(graph=model_skipgram.graph,
config=tf.ConfigProto(log_device_placement=True,allow_soft_placement=False)) as sess:
vocab_mapping = tf.constant(self._get_vocab(vocab))
table = tf.contrib.lookup.index_table_from_tensor(
mapping=vocab_mapping, num_oov_buckets=1,
default_value=-1)
def parse_mapping(line):
line = tf.string_split([line], ' ').values
line = table.lookup(line)
label = line[0:1]
features = line[1:]
return features, tf.tile(label, [tf.shape(features)[0]])
dataset = tf.data.TextLineDataset(filenames)
dataset = dataset.map(parse_mapping)
dataset = dataset.flat_map(lambda features, labels:
tf.data.Dataset().zip((
tf.data.Dataset().from_tensor_slices(features),
tf.data.Dataset().from_tensor_slices(labels))
))
dataset = dataset.shuffle(1000).batch(self.batch_size.value)
embedding = model_skipgram.train(dataset)
with self.output() as o:
o.emit(embedding.tolist())
class WVSimilarWords(Task):
out_dir = Parameter('./w2v/')
def __init__(self, graph_list, length, h, D):
self.graph_list = graph_list
self.h = h
self.D = D
self.length = length
def require(self):
out = [WVVocabulary(self.graph_list, self.length, self.h, self.D),
WVEmbeddingTask(self.graph_list, self.length,
self.h, self.D)]
return out
def output(self):
path = self.out_dir.value + self.__taskid__() + '.json'
return CachedTarget(
LocalTarget(path, service=JsonService)
)
def __taskid__(self):
return 'W2VSimilarWords_%d_%d_%d' % (self.h, self.D,
containerHash(self.graph_list))
def run(self):
with self.input()[0] as i:
vocab = i.query()
with self.input()[1] as i:
embedding = np.array(i.query())
inv_vocab = [None]*len(vocab)
for k, v in vocab.items():
inv_vocab[v[0]] = k
inv_vocab = inv_vocab
dis = cdist(embedding, embedding, 'cosine')
arg_sort = np.argsort(dis, axis=1)[:, 1:6]
near = {}
for i, k in enumerate(inv_vocab):
row = arg_sort[i]
near[k] = []
for j in range(row.shape[0]):
near[k].append([inv_vocab[row[j]], 1-dis[i, j]])
with self.output() as o:
o.emit(near)
|
cedricrupb/pySVRanker
|
word2vec_tasks.py
|
word2vec_tasks.py
|
py
| 14,557 |
python
|
en
|
code
| 2 |
github-code
|
6
|
44112547600
|
from tkinter import *
root = Tk()
root.title("Calculator")
entry_box = Entry(root,width=35,font=('Century Schoolbook', 12))
entry_box.grid(row=0,column=0,columnspan=4,padx=10,pady=20,ipady=5)
answer = 0
val1=""
operands=['+','-','*','/','=']
def button_click(number):
current = entry_box.get()
if number in operands:
if current[-1] not in operands:
entry_box.delete(0,END)
entry_box.insert(0,str(current)+str(number))
elif number == '.':
flag = 1
l = len(current)
i = -1
while(i>=-l):
if current[i] == '.':
flag = 0
break
elif current[i] in operands:
flag = 1
break
i=i-1
if(flag == 1):
entry_box.delete(0,END)
entry_box.insert(0,str(current)+str(number))
else:
entry_box.delete(0,END)
entry_box.insert(0,str(current)+str(number))
def button_clear():
entry_box.delete(0,END)
#def operations(op):
def button_equal():
current = entry_box.get()
if current[-1] not in operands:
entry_box.delete(0,END)
entry_box.insert(0,str(current)+"=")
current = entry_box.get()
op = '+'
val1=""
answer=0.00
for i in range(len(current)):
if current[i] in operands:
if op == '+':
answer = answer + float(val1)
if op == '-':
answer = answer - float(val1)
if op == '*':
answer = answer * float(val1)
if op == '/':
answer = answer / float(val1)
val1=""
op = current[i]
else:
val1 = val1 + current[i]
i=i+1
entry_box.delete(0,END)
entry_box.insert(0,answer)
def button_back():
current=entry_box.get()
entry_box.delete(0,END)
entry_box.insert(0,current[0:-1])
#def operations:
back_button = Button(root,text="←",width=20,height=2,command=button_back)
back_button.grid(row=1,column=0,columnspan=2,padx=5,pady=5)
clear_button = Button(root,text="Clear",width=20,height=2,command=button_clear)
clear_button.grid(row=1,column=2,columnspan=2,padx=5,pady=5)
button_7 = Button(root,text="7",width=7,height=2,command=lambda:button_click(7))
button_7.grid(row=2,column=0,padx=5,pady=5)
button_8 = Button(root,text="8",width=7,height=2,command=lambda:button_click(8))
button_8.grid(row=2,column=1,padx=5,pady=5)
button_9 = Button(root,text="9",width=7,height=2,command=lambda:button_click(9))
button_9.grid(row=2,column=2,padx=5,pady=5)
button_add = Button(root,text="+",width=7,height=2,command=lambda:button_click('+'))
button_add.grid(row=2,column=3,padx=5,pady=5)
button_4 = Button(root,text="4",width=7,height=2,command=lambda:button_click(4))
button_4.grid(row=3,column=0,padx=5,pady=5)
button_5 = Button(root,text="5",width=7,height=2,command=lambda:button_click(5))
button_5.grid(row=3,column=1,padx=5,pady=5)
button_6 = Button(root,text="6",width=7,height=2,command=lambda:button_click(6))
button_6.grid(row=3,column=2,padx=5,pady=5)
button_sub = Button(root,text="-",width=7,height=2,command=lambda:button_click('-'))
button_sub.grid(row=3,column=3,padx=5,pady=5)
button_1 = Button(root,text="1",width=7,height=2,command=lambda:button_click(1))
button_1.grid(row=4,column=0,padx=5,pady=5)
button_2 = Button(root,text="2",width=7,height=2,command=lambda:button_click(2))
button_2.grid(row=4,column=1,padx=5,pady=5)
button_3 = Button(root,text="3",width=7,height=2,command=lambda:button_click(3))
button_3.grid(row=4,column=2,padx=5,pady=5)
button_mul = Button(root,text="*",width=7,height=2,command=lambda:button_click('*'))
button_mul.grid(row=4,column=3,padx=5,pady=5)
button_0 = Button(root,text="0",width=20,height=2,command=lambda:button_click(0))
button_0.grid(row=5,column=0,columnspan=2,padx=5,pady=5)
button_dec = Button(root,text=".",width=7,height=2,command=lambda:button_click("."))
button_dec.grid(row=5,column=2,padx=5,pady=5)
button_div = Button(root,text="/",width=7,height=2,command=lambda:button_click('/'))
button_div.grid(row=5,column=3,padx=5,pady=5)
button_equal = Button(root,text="=",width=43,height=2,command=button_equal)
button_equal.grid(row=6,column=0,columnspan=4,padx=5,pady=5)
root.mainloop()
|
shaharyar797/tkinter
|
Calculator.py
|
Calculator.py
|
py
| 4,607 |
python
|
en
|
code
| 1 |
github-code
|
6
|
25566549731
|
from django.shortcuts import render
from .models import Product
from .forms import ProductForm
from django.http import HttpResponse
def list(request):
products = Product.objects.all()
context = {'products': products}
return render(request, 'product/list.html', context)
def save_product(request):
if(request.method == 'POST'):
product = ProductForm(request.POST)
if product.is_valid:
product.save()
products = Product.objects.all()
context = {'products': products, 'product': products}
return render(request, 'product/list.html', context)
else:
return render(request, 'layout/create.html', {'product': product})
def create(request):
form = ProductForm()
context = {'form': form}
return render(request, 'product/create.html', context)
def delete(request, id):
if(request.method == 'GET'):
product = Product.objects.get(id=id)
product.delete()
products = Product.objects.all()
context = {'products': products}
return render(request, 'product/list.html', context)
def update(request, id):
product = Product.objects.get(id=id)
if(request.method == 'GET'):
form = ProductForm(instance=product)
context = {'form': form, 'id': id}
return render(request, 'product/update.html', context)
if(request.method == 'POST'):
form = ProductForm(request.POST, instance=product)
if form.is_valid():
form.save()
products = Product.objects.all()
context = {'products': products}
return render(request, 'product/list.html', context)
|
d3stroya/ob-django
|
wishlist/product/views.py
|
views.py
|
py
| 1,700 |
python
|
en
|
code
| 0 |
github-code
|
6
|
28492207070
|
import librosa,librosa.display
import matplotlib.pyplot as plt
import numpy as np
file="your-summer-day-5448.wav"
#waveform
signal,sr=librosa.load(file,sr=22050) #signal will be a numpy array which will have no.of values=sr*duration of sound track
librosa.display.waveplot(signal,sr=sr) #visualizing the wave
plt.xlabel("Time")
plt.ylabel("Amplitude")
plt.show()
#time domain->frequency domain(fourier tranform)
fft=np.fft.fft(signal) #np array
magnitude= np.abs(fft) #indicates contrib of each frequency to the sound
frequency=np.linspace(0,sr,len(magnitude))
left_frequency=frequency[:int(len(frequency)/2)]
left_magnitude=magnitude[:int(len(magnitude)/2)]
plt.plot(left_frequency,left_magnitude)
plt.xlabel("Frequency")
plt.ylabel("Magnitude")
plt.show()
#get spectogram(amplitude as function of freq and time)
n_fft=2048 #no.of sample in each fft
hop_length=512 #amount of shift to next fft to the right
stft=librosa.core.stft(signal,hop_length=hop_length,n_fft=n_fft)
spectrogram=np.abs(stft)
log_spectrogram=librosa.amplitude_to_db(spectrogram) #converting amplitude to decibel
librosa.display.specshow(log_spectrogram,sr=sr,hop_length=hop_length) #specshow helps to visualize spectogram like data(x axis, y axis and color label)
plt.xlabel("Time")
plt.ylabel("Frequency")
plt.colorbar() #amplitude will be displayed by color
plt.show()
#mfccs
MFCCS=librosa.feature.mfcc(signal,n_fft=n_fft,hop_length=hop_length,n_mfcc=13)
librosa.display.specshow(MFCCS,sr=sr,hop_length=hop_length) #specshow helps to visualize spectogram like data(x axis, y axis and color label)
plt.xlabel("Time")
plt.ylabel("MFCC")
plt.colorbar() #amplitude will be displayed by color
plt.show()
|
yashi4001/ML_Basics
|
audio_preprocess.py
|
audio_preprocess.py
|
py
| 1,735 |
python
|
en
|
code
| 0 |
github-code
|
6
|
12397412517
|
import sys
import re
import os
# nagios exit code
STATUS_OK = 0
STATUS_WARNING = 1
STATUS_ERROR = 2
STATUS_UNKNOWN = 3
def main():
try:
with open('/proc/drbd') as f:
for line in f:
match = re.search('^\ *(.*): cs:(.*) ro:([^\ ]*) ds:([^\ ]*) .*$', line)
if match:
id = match.group(1)
cs = match.group(2)
ro = match.group(3)
ds = match.group(4)
res = id
for root, dirs, files in os.walk('/dev/drbd/by-res/'):
for f in files:
if f == id:
res = os.path.basename(root)
error_msg='DRBD %s (drbd%s) state is %s (ro:%s, ds:%s)' % (res, id, cs, ro, ds)
if cs not in ('Connected', 'SyncSource', 'SyncTarget') or 'Unknown' in ro or 'Unknown' in ds:
print(error_msg)
return STATUS_ERROR
if cs == 'SyncTarget':
print(error_msg)
return STATUS_WARNING
print('DRDB is OK')
return STATUS_OK
except IOError as e:
print(e)
return STATUS_UNKNOWN
if __name__ == "__main__":
sys.exit(main())
|
Sysnove/shinken-plugins
|
check_drbd.py
|
check_drbd.py
|
py
| 1,342 |
python
|
en
|
code
| 9 |
github-code
|
6
|
17651189647
|
import telebot
from config import TOKEN, keys
from extensions import ExchangeException, Exchange
bot = telebot.TeleBot(TOKEN)
# Обработка команды /start
@bot.message_handler(commands=['start'])
def start(message):
start = "Привет! Я бот, который может вернуть цену на определенное количество валюты.\n\n" \
"Пример использования: <имя валюты, цену которой вы хотите узнать> " \
"<имя валюты, в которой нужно узнать цену первой валюты> <количество первой валюты>\n\n" \
"Команды:\n" \
"/start - выводит инструкции по применению бота\n" \
"/help - выводит список команд бота\n" \
"/values - выводит информацию о всех доступных валютах\n\n" \
"Пример запроса: Рубль доллар 100"
bot.reply_to(message, start)
# Обработка команды /help
@bot.message_handler(commands=['help'])
def help(message):
help = "/start - выводит инструкции по применению бота\n" \
"/help - выводит список команд бота\n" \
"/values - выводит информацию о всех доступных валютах\n\n" \
"Регистр значения не имеет.\n\n" \
"Пример запроса: Рубль доллар 100"
bot.reply_to(message,help)
# Обработка команды /values
@bot.message_handler(commands=['values'])
def values(message: telebot.types.Message):
text = 'Доступные валюты:'
for key in keys.keys():
text = '\n'.join((text, key,))
bot.reply_to(message, text)
# Обработка текстовых сообщений от пользователя
@bot.message_handler(content_types=['text'])
def get_price(message: telebot.types.Message):
try:
values = message.text.lower().split(' ') # преобразование в нижний регистр регистр
if len(values) != 3:
raise ExchangeException('Введите команду или 3 параметра')
quote, base, amount = values
total_base = Exchange.get_price(quote, base, amount)
except ExchangeException as e:
bot.reply_to(message, f'Ошибка пользователя.\n{e}')
except Exception as e:
bot.reply_to(message, f'Что-то пошло не так с {e}')
else:
text = f'Переводим {quote} в {base}\n{amount} {quote} = {total_base} {base}'
bot.send_message(message.chat.id, text)
bot.polling()
|
Airton99999/telegram_bot_convertor
|
bot.py
|
bot.py
|
py
| 2,989 |
python
|
ru
|
code
| 0 |
github-code
|
6
|
34336638325
|
# main.py
from project.scraper import Scraper
from project.database import Database
def main():
"""
main function
"""
#Initialize the scraper
scraper = Scraper()
#Get the data
response = scraper.scrape_data()
data = scraper.parse_response(response)
#Connect to the database
db = Database()
db.connect()
#Store the data
db.store_data(data)
#Close connection
db.close()
if __name__ == "__main__":
main()
|
MiriamGeek/scraper-pipeline
|
project/__main__.py
|
__main__.py
|
py
| 480 |
python
|
en
|
code
| 0 |
github-code
|
6
|
9185132950
|
import collections
from collections import abc
import getpass
import io
import itertools
import logging
import os
import socket
import struct
import sys
import threading
import time
import timeit
import traceback
import types
import warnings
from absl import flags
from absl.logging import converter
try:
from typing import NoReturn
except ImportError:
pass
FLAGS = flags.FLAGS
# Logging levels.
FATAL = converter.ABSL_FATAL
ERROR = converter.ABSL_ERROR
WARNING = converter.ABSL_WARNING
WARN = converter.ABSL_WARNING # Deprecated name.
INFO = converter.ABSL_INFO
DEBUG = converter.ABSL_DEBUG
# Regex to match/parse log line prefixes.
ABSL_LOGGING_PREFIX_REGEX = (
r'^(?P<severity>[IWEF])'
r'(?P<month>\d\d)(?P<day>\d\d) '
r'(?P<hour>\d\d):(?P<minute>\d\d):(?P<second>\d\d)'
r'\.(?P<microsecond>\d\d\d\d\d\d) +'
r'(?P<thread_id>-?\d+) '
r'(?P<filename>[a-zA-Z<][\w._<>-]+):(?P<line>\d+)')
# Mask to convert integer thread ids to unsigned quantities for logging purposes
_THREAD_ID_MASK = 2 ** (struct.calcsize('L') * 8) - 1
# Extra property set on the LogRecord created by ABSLLogger when its level is
# CRITICAL/FATAL.
_ABSL_LOG_FATAL = '_absl_log_fatal'
# Extra prefix added to the log message when a non-absl logger logs a
# CRITICAL/FATAL message.
_CRITICAL_PREFIX = 'CRITICAL - '
# Used by findCaller to skip callers from */logging/__init__.py.
_LOGGING_FILE_PREFIX = os.path.join('logging', '__init__.')
# The ABSL logger instance, initialized in _initialize().
_absl_logger = None
# The ABSL handler instance, initialized in _initialize().
_absl_handler = None
_CPP_NAME_TO_LEVELS = {
'debug': '0', # Abseil C++ has no DEBUG level, mapping it to INFO here.
'info': '0',
'warning': '1',
'warn': '1',
'error': '2',
'fatal': '3'
}
_CPP_LEVEL_TO_NAMES = {
'0': 'info',
'1': 'warning',
'2': 'error',
'3': 'fatal',
}
class _VerbosityFlag(flags.Flag):
"""Flag class for -v/--verbosity."""
def __init__(self, *args, **kwargs):
super(_VerbosityFlag, self).__init__(
flags.IntegerParser(),
flags.ArgumentSerializer(),
*args, **kwargs)
@property
def value(self):
return self._value
@value.setter
def value(self, v):
self._value = v
self._update_logging_levels()
def _update_logging_levels(self):
"""Updates absl logging levels to the current verbosity.
Visibility: module-private
"""
if not _absl_logger:
return
if self._value <= converter.ABSL_DEBUG:
standard_verbosity = converter.absl_to_standard(self._value)
else:
# --verbosity is set to higher than 1 for vlog.
standard_verbosity = logging.DEBUG - (self._value - 1)
# Also update root level when absl_handler is used.
if _absl_handler in logging.root.handlers:
# Make absl logger inherit from the root logger. absl logger might have
# a non-NOTSET value if logging.set_verbosity() is called at import time.
_absl_logger.setLevel(logging.NOTSET)
logging.root.setLevel(standard_verbosity)
else:
_absl_logger.setLevel(standard_verbosity)
class _LoggerLevelsFlag(flags.Flag):
"""Flag class for --logger_levels."""
def __init__(self, *args, **kwargs):
super(_LoggerLevelsFlag, self).__init__(
_LoggerLevelsParser(),
_LoggerLevelsSerializer(),
*args, **kwargs)
@property
def value(self):
# For lack of an immutable type, be defensive and return a copy.
# Modifications to the dict aren't supported and won't have any affect.
# While Py3 could use MappingProxyType, that isn't deepcopy friendly, so
# just return a copy.
return self._value.copy()
@value.setter
def value(self, v):
self._value = {} if v is None else v
self._update_logger_levels()
def _update_logger_levels(self):
# Visibility: module-private.
# This is called by absl.app.run() during initialization.
for name, level in self._value.items():
logging.getLogger(name).setLevel(level)
class _LoggerLevelsParser(flags.ArgumentParser):
"""Parser for --logger_levels flag."""
def parse(self, value):
if isinstance(value, abc.Mapping):
return value
pairs = [pair.strip() for pair in value.split(',') if pair.strip()]
# Preserve the order so that serialization is deterministic.
levels = collections.OrderedDict()
for name_level in pairs:
name, level = name_level.split(':', 1)
name = name.strip()
level = level.strip()
levels[name] = level
return levels
class _LoggerLevelsSerializer(object):
"""Serializer for --logger_levels flag."""
def serialize(self, value):
if isinstance(value, str):
return value
return ','.join(
'{}:{}'.format(name, level) for name, level in value.items())
class _StderrthresholdFlag(flags.Flag):
"""Flag class for --stderrthreshold."""
def __init__(self, *args, **kwargs):
super(_StderrthresholdFlag, self).__init__(
flags.ArgumentParser(),
flags.ArgumentSerializer(),
*args, **kwargs)
@property
def value(self):
return self._value
@value.setter
def value(self, v):
if v in _CPP_LEVEL_TO_NAMES:
# --stderrthreshold also accepts numeric strings whose values are
# Abseil C++ log levels.
cpp_value = int(v)
v = _CPP_LEVEL_TO_NAMES[v] # Normalize to strings.
elif v.lower() in _CPP_NAME_TO_LEVELS:
v = v.lower()
if v == 'warn':
v = 'warning' # Use 'warning' as the canonical name.
cpp_value = int(_CPP_NAME_TO_LEVELS[v])
else:
raise ValueError(
'--stderrthreshold must be one of (case-insensitive) '
"'debug', 'info', 'warning', 'error', 'fatal', "
"or '0', '1', '2', '3', not '%s'" % v)
self._value = v
flags.DEFINE_boolean('logtostderr',
False,
'Should only log to stderr?', allow_override_cpp=True)
flags.DEFINE_boolean('alsologtostderr',
False,
'also log to stderr?', allow_override_cpp=True)
flags.DEFINE_string('log_dir',
os.getenv('TEST_TMPDIR', ''),
'directory to write logfiles into',
allow_override_cpp=True)
flags.DEFINE_flag(_VerbosityFlag(
'verbosity', -1,
'Logging verbosity level. Messages logged at this level or lower will '
'be included. Set to 1 for debug logging. If the flag was not set or '
'supplied, the value will be changed from the default of -1 (warning) to '
'0 (info) after flags are parsed.',
short_name='v', allow_hide_cpp=True))
flags.DEFINE_flag(
_LoggerLevelsFlag(
'logger_levels', {},
'Specify log level of loggers. The format is a CSV list of '
'`name:level`. Where `name` is the logger name used with '
'`logging.getLogger()`, and `level` is a level name (INFO, DEBUG, '
'etc). e.g. `myapp.foo:INFO,other.logger:DEBUG`'))
flags.DEFINE_flag(_StderrthresholdFlag(
'stderrthreshold', 'fatal',
'log messages at this level, or more severe, to stderr in '
'addition to the logfile. Possible values are '
"'debug', 'info', 'warning', 'error', and 'fatal'. "
'Obsoletes --alsologtostderr. Using --alsologtostderr '
'cancels the effect of this flag. Please also note that '
'this flag is subject to --verbosity and requires logfile '
'not be stderr.', allow_hide_cpp=True))
flags.DEFINE_boolean('showprefixforinfo', True,
'If False, do not prepend prefix to info messages '
'when it\'s logged to stderr, '
'--verbosity is set to INFO level, '
'and python logging is used.')
def get_verbosity():
"""Returns the logging verbosity."""
return FLAGS['verbosity'].value
def set_verbosity(v):
"""Sets the logging verbosity.
Causes all messages of level <= v to be logged,
and all messages of level > v to be silently discarded.
Args:
v: int|str, the verbosity level as an integer or string. Legal string values
are those that can be coerced to an integer as well as case-insensitive
'debug', 'info', 'warning', 'error', and 'fatal'.
"""
try:
new_level = int(v)
except ValueError:
new_level = converter.ABSL_NAMES[v.upper()]
FLAGS.verbosity = new_level
def set_stderrthreshold(s):
"""Sets the stderr threshold to the value passed in.
Args:
s: str|int, valid strings values are case-insensitive 'debug',
'info', 'warning', 'error', and 'fatal'; valid integer values are
logging.DEBUG|INFO|WARNING|ERROR|FATAL.
Raises:
ValueError: Raised when s is an invalid value.
"""
if s in converter.ABSL_LEVELS:
FLAGS.stderrthreshold = converter.ABSL_LEVELS[s]
elif isinstance(s, str) and s.upper() in converter.ABSL_NAMES:
FLAGS.stderrthreshold = s
else:
raise ValueError(
'set_stderrthreshold only accepts integer absl logging level '
'from -3 to 1, or case-insensitive string values '
"'debug', 'info', 'warning', 'error', and 'fatal'. "
'But found "{}" ({}).'.format(s, type(s)))
def fatal(msg, *args, **kwargs):
# type: (Any, Any, Any) -> NoReturn
"""Logs a fatal message."""
log(FATAL, msg, *args, **kwargs)
def error(msg, *args, **kwargs):
"""Logs an error message."""
log(ERROR, msg, *args, **kwargs)
def warning(msg, *args, **kwargs):
"""Logs a warning message."""
log(WARNING, msg, *args, **kwargs)
def warn(msg, *args, **kwargs):
"""Deprecated, use 'warning' instead."""
warnings.warn("The 'warn' function is deprecated, use 'warning' instead",
DeprecationWarning, 2)
log(WARNING, msg, *args, **kwargs)
def info(msg, *args, **kwargs):
"""Logs an info message."""
log(INFO, msg, *args, **kwargs)
def debug(msg, *args, **kwargs):
"""Logs a debug message."""
log(DEBUG, msg, *args, **kwargs)
def exception(msg, *args, **kwargs):
"""Logs an exception, with traceback and message."""
error(msg, *args, **kwargs, exc_info=True)
# Counter to keep track of number of log entries per token.
_log_counter_per_token = {}
def _get_next_log_count_per_token(token):
"""Wrapper for _log_counter_per_token. Thread-safe.
Args:
token: The token for which to look up the count.
Returns:
The number of times this function has been called with
*token* as an argument (starting at 0).
"""
# Can't use a defaultdict because defaultdict isn't atomic, whereas
# setdefault is.
return next(_log_counter_per_token.setdefault(token, itertools.count()))
def log_every_n(level, msg, n, *args):
"""Logs ``msg % args`` at level 'level' once per 'n' times.
Logs the 1st call, (N+1)st call, (2N+1)st call, etc.
Not threadsafe.
Args:
level: int, the absl logging level at which to log.
msg: str, the message to be logged.
n: int, the number of times this should be called before it is logged.
*args: The args to be substituted into the msg.
"""
count = _get_next_log_count_per_token(get_absl_logger().findCaller())
log_if(level, msg, not (count % n), *args)
# Keeps track of the last log time of the given token.
# Note: must be a dict since set/get is atomic in CPython.
# Note: entries are never released as their number is expected to be low.
_log_timer_per_token = {}
def _seconds_have_elapsed(token, num_seconds):
"""Tests if 'num_seconds' have passed since 'token' was requested.
Not strictly thread-safe - may log with the wrong frequency if called
concurrently from multiple threads. Accuracy depends on resolution of
'timeit.default_timer()'.
Always returns True on the first call for a given 'token'.
Args:
token: The token for which to look up the count.
num_seconds: The number of seconds to test for.
Returns:
Whether it has been >= 'num_seconds' since 'token' was last requested.
"""
now = timeit.default_timer()
then = _log_timer_per_token.get(token, None)
if then is None or (now - then) >= num_seconds:
_log_timer_per_token[token] = now
return True
else:
return False
def log_every_n_seconds(level, msg, n_seconds, *args):
"""Logs ``msg % args`` at level ``level`` iff ``n_seconds`` elapsed since last call.
Logs the first call, logs subsequent calls if 'n' seconds have elapsed since
the last logging call from the same call site (file + line). Not thread-safe.
Args:
level: int, the absl logging level at which to log.
msg: str, the message to be logged.
n_seconds: float or int, seconds which should elapse before logging again.
*args: The args to be substituted into the msg.
"""
should_log = _seconds_have_elapsed(get_absl_logger().findCaller(), n_seconds)
log_if(level, msg, should_log, *args)
def log_first_n(level, msg, n, *args):
"""Logs ``msg % args`` at level ``level`` only first ``n`` times.
Not threadsafe.
Args:
level: int, the absl logging level at which to log.
msg: str, the message to be logged.
n: int, the maximal number of times the message is logged.
*args: The args to be substituted into the msg.
"""
count = _get_next_log_count_per_token(get_absl_logger().findCaller())
log_if(level, msg, count < n, *args)
def log_if(level, msg, condition, *args):
"""Logs ``msg % args`` at level ``level`` only if condition is fulfilled."""
if condition:
log(level, msg, *args)
def log(level, msg, *args, **kwargs):
"""Logs ``msg % args`` at absl logging level ``level``.
If no args are given just print msg, ignoring any interpolation specifiers.
Args:
level: int, the absl logging level at which to log the message
(logging.DEBUG|INFO|WARNING|ERROR|FATAL). While some C++ verbose logging
level constants are also supported, callers should prefer explicit
logging.vlog() calls for such purpose.
msg: str, the message to be logged.
*args: The args to be substituted into the msg.
**kwargs: May contain exc_info to add exception traceback to message.
"""
if level > converter.ABSL_DEBUG:
# Even though this function supports level that is greater than 1, users
# should use logging.vlog instead for such cases.
# Treat this as vlog, 1 is equivalent to DEBUG.
standard_level = converter.STANDARD_DEBUG - (level - 1)
else:
if level < converter.ABSL_FATAL:
level = converter.ABSL_FATAL
standard_level = converter.absl_to_standard(level)
# Match standard logging's behavior. Before use_absl_handler() and
# logging is configured, there is no handler attached on _absl_logger nor
# logging.root. So logs go no where.
if not logging.root.handlers:
logging.basicConfig()
_absl_logger.log(standard_level, msg, *args, **kwargs)
def vlog(level, msg, *args, **kwargs):
"""Log ``msg % args`` at C++ vlog level ``level``.
Args:
level: int, the C++ verbose logging level at which to log the message,
e.g. 1, 2, 3, 4... While absl level constants are also supported,
callers should prefer logging.log|debug|info|... calls for such purpose.
msg: str, the message to be logged.
*args: The args to be substituted into the msg.
**kwargs: May contain exc_info to add exception traceback to message.
"""
log(level, msg, *args, **kwargs)
def vlog_is_on(level):
"""Checks if vlog is enabled for the given level in caller's source file.
Args:
level: int, the C++ verbose logging level at which to log the message,
e.g. 1, 2, 3, 4... While absl level constants are also supported,
callers should prefer level_debug|level_info|... calls for
checking those.
Returns:
True if logging is turned on for that level.
"""
if level > converter.ABSL_DEBUG:
# Even though this function supports level that is greater than 1, users
# should use logging.vlog instead for such cases.
# Treat this as vlog, 1 is equivalent to DEBUG.
standard_level = converter.STANDARD_DEBUG - (level - 1)
else:
if level < converter.ABSL_FATAL:
level = converter.ABSL_FATAL
standard_level = converter.absl_to_standard(level)
return _absl_logger.isEnabledFor(standard_level)
def flush():
"""Flushes all log files."""
get_absl_handler().flush()
def level_debug():
"""Returns True if debug logging is turned on."""
return get_verbosity() >= DEBUG
def level_info():
"""Returns True if info logging is turned on."""
return get_verbosity() >= INFO
def level_warning():
"""Returns True if warning logging is turned on."""
return get_verbosity() >= WARNING
level_warn = level_warning # Deprecated function.
def level_error():
"""Returns True if error logging is turned on."""
return get_verbosity() >= ERROR
def get_log_file_name(level=INFO):
"""Returns the name of the log file.
For Python logging, only one file is used and level is ignored. And it returns
empty string if it logs to stderr/stdout or the log stream has no `name`
attribute.
Args:
level: int, the absl.logging level.
Raises:
ValueError: Raised when `level` has an invalid value.
"""
if level not in converter.ABSL_LEVELS:
raise ValueError('Invalid absl.logging level {}'.format(level))
stream = get_absl_handler().python_handler.stream
if (stream == sys.stderr or stream == sys.stdout or
not hasattr(stream, 'name')):
return ''
else:
return stream.name
def find_log_dir_and_names(program_name=None, log_dir=None):
"""Computes the directory and filename prefix for log file.
Args:
program_name: str|None, the filename part of the path to the program that
is running without its extension. e.g: if your program is called
``usr/bin/foobar.py`` this method should probably be called with
``program_name='foobar`` However, this is just a convention, you can
pass in any string you want, and it will be used as part of the
log filename. If you don't pass in anything, the default behavior
is as described in the example. In python standard logging mode,
the program_name will be prepended with ``py_`` if it is the
``program_name`` argument is omitted.
log_dir: str|None, the desired log directory.
Returns:
(log_dir, file_prefix, symlink_prefix)
Raises:
FileNotFoundError: raised in Python 3 when it cannot find a log directory.
OSError: raised in Python 2 when it cannot find a log directory.
"""
if not program_name:
# Strip the extension (foobar.par becomes foobar, and
# fubar.py becomes fubar). We do this so that the log
# file names are similar to C++ log file names.
program_name = os.path.splitext(os.path.basename(sys.argv[0]))[0]
# Prepend py_ to files so that python code gets a unique file, and
# so that C++ libraries do not try to write to the same log files as us.
program_name = 'py_%s' % program_name
actual_log_dir = find_log_dir(log_dir=log_dir)
try:
username = getpass.getuser()
except KeyError:
# This can happen, e.g. when running under docker w/o passwd file.
if hasattr(os, 'getuid'):
# Windows doesn't have os.getuid
username = str(os.getuid())
else:
username = 'unknown'
hostname = socket.gethostname()
file_prefix = '%s.%s.%s.log' % (program_name, hostname, username)
return actual_log_dir, file_prefix, program_name
def find_log_dir(log_dir=None):
"""Returns the most suitable directory to put log files into.
Args:
log_dir: str|None, if specified, the logfile(s) will be created in that
directory. Otherwise if the --log_dir command-line flag is provided,
the logfile will be created in that directory. Otherwise the logfile
will be created in a standard location.
Raises:
FileNotFoundError: raised in Python 3 when it cannot find a log directory.
OSError: raised in Python 2 when it cannot find a log directory.
"""
# Get a list of possible log dirs (will try to use them in order).
if log_dir:
# log_dir was explicitly specified as an arg, so use it and it alone.
dirs = [log_dir]
elif FLAGS['log_dir'].value:
# log_dir flag was provided, so use it and it alone (this mimics the
# behavior of the same flag in logging.cc).
dirs = [FLAGS['log_dir'].value]
else:
dirs = ['/tmp/', './']
# Find the first usable log dir.
for d in dirs:
if os.path.isdir(d) and os.access(d, os.W_OK):
return d
raise FileNotFoundError(
"Can't find a writable directory for logs, tried %s" % dirs)
def get_absl_log_prefix(record):
"""Returns the absl log prefix for the log record.
Args:
record: logging.LogRecord, the record to get prefix for.
"""
created_tuple = time.localtime(record.created)
created_microsecond = int(record.created % 1.0 * 1e6)
critical_prefix = ''
level = record.levelno
if _is_non_absl_fatal_record(record):
# When the level is FATAL, but not logged from absl, lower the level so
# it's treated as ERROR.
level = logging.ERROR
critical_prefix = _CRITICAL_PREFIX
severity = converter.get_initial_for_level(level)
return '%c%02d%02d %02d:%02d:%02d.%06d %5d %s:%d] %s' % (
severity,
created_tuple.tm_mon,
created_tuple.tm_mday,
created_tuple.tm_hour,
created_tuple.tm_min,
created_tuple.tm_sec,
created_microsecond,
_get_thread_id(),
record.filename,
record.lineno,
critical_prefix)
def skip_log_prefix(func):
"""Skips reporting the prefix of a given function or name by :class:`~absl.logging.ABSLLogger`.
This is a convenience wrapper function / decorator for
:meth:`~absl.logging.ABSLLogger.register_frame_to_skip`.
If a callable function is provided, only that function will be skipped.
If a function name is provided, all functions with the same name in the
file that this is called in will be skipped.
This can be used as a decorator of the intended function to be skipped.
Args:
func: Callable function or its name as a string.
Returns:
func (the input, unchanged).
Raises:
ValueError: The input is callable but does not have a function code object.
TypeError: The input is neither callable nor a string.
"""
if callable(func):
func_code = getattr(func, '__code__', None)
if func_code is None:
raise ValueError('Input callable does not have a function code object.')
file_name = func_code.co_filename
func_name = func_code.co_name
func_lineno = func_code.co_firstlineno
elif isinstance(func, str):
file_name = get_absl_logger().findCaller()[0]
func_name = func
func_lineno = None
else:
raise TypeError('Input is neither callable nor a string.')
ABSLLogger.register_frame_to_skip(file_name, func_name, func_lineno)
return func
def _is_non_absl_fatal_record(log_record):
return (log_record.levelno >= logging.FATAL and
not log_record.__dict__.get(_ABSL_LOG_FATAL, False))
def _is_absl_fatal_record(log_record):
return (log_record.levelno >= logging.FATAL and
log_record.__dict__.get(_ABSL_LOG_FATAL, False))
# Indicates if we still need to warn about pre-init logs going to stderr.
_warn_preinit_stderr = True
class PythonHandler(logging.StreamHandler):
"""The handler class used by Abseil Python logging implementation."""
def __init__(self, stream=None, formatter=None):
super(PythonHandler, self).__init__(stream)
self.setFormatter(formatter or PythonFormatter())
def start_logging_to_file(self, program_name=None, log_dir=None):
"""Starts logging messages to files instead of standard error."""
FLAGS.logtostderr = False
actual_log_dir, file_prefix, symlink_prefix = find_log_dir_and_names(
program_name=program_name, log_dir=log_dir)
basename = '%s.INFO.%s.%d' % (
file_prefix,
time.strftime('%Y%m%d-%H%M%S', time.localtime(time.time())),
os.getpid())
filename = os.path.join(actual_log_dir, basename)
self.stream = open(filename, 'a', encoding='utf-8')
# os.symlink is not available on Windows Python 2.
if getattr(os, 'symlink', None):
# Create a symlink to the log file with a canonical name.
symlink = os.path.join(actual_log_dir, symlink_prefix + '.INFO')
try:
if os.path.islink(symlink):
os.unlink(symlink)
os.symlink(os.path.basename(filename), symlink)
except EnvironmentError:
# If it fails, we're sad but it's no error. Commonly, this
# fails because the symlink was created by another user and so
# we can't modify it
pass
def use_absl_log_file(self, program_name=None, log_dir=None):
"""Conditionally logs to files, based on --logtostderr."""
if FLAGS['logtostderr'].value:
self.stream = sys.stderr
else:
self.start_logging_to_file(program_name=program_name, log_dir=log_dir)
def flush(self):
"""Flushes all log files."""
self.acquire()
try:
self.stream.flush()
except (EnvironmentError, ValueError):
# A ValueError is thrown if we try to flush a closed file.
pass
finally:
self.release()
def _log_to_stderr(self, record):
"""Emits the record to stderr.
This temporarily sets the handler stream to stderr, calls
StreamHandler.emit, then reverts the stream back.
Args:
record: logging.LogRecord, the record to log.
"""
# emit() is protected by a lock in logging.Handler, so we don't need to
# protect here again.
old_stream = self.stream
self.stream = sys.stderr
try:
super(PythonHandler, self).emit(record)
finally:
self.stream = old_stream
def emit(self, record):
"""Prints a record out to some streams.
1. If ``FLAGS.logtostderr`` is set, it will print to ``sys.stderr`` ONLY.
2. If ``FLAGS.alsologtostderr`` is set, it will print to ``sys.stderr``.
3. If ``FLAGS.logtostderr`` is not set, it will log to the stream
associated with the current thread.
Args:
record: :class:`logging.LogRecord`, the record to emit.
"""
# People occasionally call logging functions at import time before
# our flags may have even been defined yet, let alone even parsed, as we
# rely on the C++ side to define some flags for us and app init to
# deal with parsing. Match the C++ library behavior of notify and emit
# such messages to stderr. It encourages people to clean-up and does
# not hide the message.
level = record.levelno
if not FLAGS.is_parsed(): # Also implies "before flag has been defined".
global _warn_preinit_stderr
if _warn_preinit_stderr:
sys.stderr.write(
'WARNING: Logging before flag parsing goes to stderr.\n')
_warn_preinit_stderr = False
self._log_to_stderr(record)
elif FLAGS['logtostderr'].value:
self._log_to_stderr(record)
else:
super(PythonHandler, self).emit(record)
stderr_threshold = converter.string_to_standard(
FLAGS['stderrthreshold'].value)
if ((FLAGS['alsologtostderr'].value or level >= stderr_threshold) and
self.stream != sys.stderr):
self._log_to_stderr(record)
# Die when the record is created from ABSLLogger and level is FATAL.
if _is_absl_fatal_record(record):
self.flush() # Flush the log before dying.
# In threaded python, sys.exit() from a non-main thread only
# exits the thread in question.
os.abort()
def close(self):
"""Closes the stream to which we are writing."""
self.acquire()
try:
self.flush()
try:
# Do not close the stream if it's sys.stderr|stdout. They may be
# redirected or overridden to files, which should be managed by users
# explicitly.
user_managed = sys.stderr, sys.stdout, sys.__stderr__, sys.__stdout__
if self.stream not in user_managed and (
not hasattr(self.stream, 'isatty') or not self.stream.isatty()):
self.stream.close()
except ValueError:
# A ValueError is thrown if we try to run isatty() on a closed file.
pass
super(PythonHandler, self).close()
finally:
self.release()
class ABSLHandler(logging.Handler):
"""Abseil Python logging module's log handler."""
def __init__(self, python_logging_formatter):
super(ABSLHandler, self).__init__()
self._python_handler = PythonHandler(formatter=python_logging_formatter)
self.activate_python_handler()
def format(self, record):
return self._current_handler.format(record)
def setFormatter(self, fmt):
self._current_handler.setFormatter(fmt)
def emit(self, record):
self._current_handler.emit(record)
def flush(self):
self._current_handler.flush()
def close(self):
super(ABSLHandler, self).close()
self._current_handler.close()
def handle(self, record):
rv = self.filter(record)
if rv:
return self._current_handler.handle(record)
return rv
@property
def python_handler(self):
return self._python_handler
def activate_python_handler(self):
"""Uses the Python logging handler as the current logging handler."""
self._current_handler = self._python_handler
def use_absl_log_file(self, program_name=None, log_dir=None):
self._current_handler.use_absl_log_file(program_name, log_dir)
def start_logging_to_file(self, program_name=None, log_dir=None):
self._current_handler.start_logging_to_file(program_name, log_dir)
class PythonFormatter(logging.Formatter):
"""Formatter class used by :class:`~absl.logging.PythonHandler`."""
def format(self, record):
"""Appends the message from the record to the results of the prefix.
Args:
record: logging.LogRecord, the record to be formatted.
Returns:
The formatted string representing the record.
"""
if (not FLAGS['showprefixforinfo'].value and
FLAGS['verbosity'].value == converter.ABSL_INFO and
record.levelno == logging.INFO and
_absl_handler.python_handler.stream == sys.stderr):
prefix = ''
else:
prefix = get_absl_log_prefix(record)
return prefix + super(PythonFormatter, self).format(record)
class ABSLLogger(logging.getLoggerClass()):
"""A logger that will create LogRecords while skipping some stack frames.
This class maintains an internal list of filenames and method names
for use when determining who called the currently executing stack
frame. Any method names from specific source files are skipped when
walking backwards through the stack.
Client code should use the register_frame_to_skip method to let the
ABSLLogger know which method from which file should be
excluded from the walk backwards through the stack.
"""
_frames_to_skip = set()
def findCaller(self, stack_info=False, stacklevel=1):
"""Finds the frame of the calling method on the stack.
This method skips any frames registered with the
ABSLLogger and any methods from this file, and whatever
method is currently being used to generate the prefix for the log
line. Then it returns the file name, line number, and method name
of the calling method. An optional fourth item may be returned,
callers who only need things from the first three are advised to
always slice or index the result rather than using direct unpacking
assignment.
Args:
stack_info: bool, when True, include the stack trace as a fourth item
returned. On Python 3 there are always four items returned - the
fourth will be None when this is False. On Python 2 the stdlib
base class API only returns three items. We do the same when this
new parameter is unspecified or False for compatibility.
Returns:
(filename, lineno, methodname[, sinfo]) of the calling method.
"""
f_to_skip = ABSLLogger._frames_to_skip
# Use sys._getframe(2) instead of logging.currentframe(), it's slightly
# faster because there is one less frame to traverse.
frame = sys._getframe(2) # pylint: disable=protected-access
while frame:
code = frame.f_code
if (_LOGGING_FILE_PREFIX not in code.co_filename and
(code.co_filename, code.co_name,
code.co_firstlineno) not in f_to_skip and
(code.co_filename, code.co_name) not in f_to_skip):
sinfo = None
if stack_info:
out = io.StringIO()
out.write(u'Stack (most recent call last):\n')
traceback.print_stack(frame, file=out)
sinfo = out.getvalue().rstrip(u'\n')
return (code.co_filename, frame.f_lineno, code.co_name, sinfo)
frame = frame.f_back
def critical(self, msg, *args, **kwargs):
"""Logs ``msg % args`` with severity ``CRITICAL``."""
self.log(logging.CRITICAL, msg, *args, **kwargs)
def fatal(self, msg, *args, **kwargs):
"""Logs ``msg % args`` with severity ``FATAL``."""
self.log(logging.FATAL, msg, *args, **kwargs)
def error(self, msg, *args, **kwargs):
"""Logs ``msg % args`` with severity ``ERROR``."""
self.log(logging.ERROR, msg, *args, **kwargs)
def warn(self, msg, *args, **kwargs):
"""Logs ``msg % args`` with severity ``WARN``."""
warnings.warn("The 'warn' method is deprecated, use 'warning' instead",
DeprecationWarning, 2)
self.log(logging.WARN, msg, *args, **kwargs)
def warning(self, msg, *args, **kwargs):
"""Logs ``msg % args`` with severity ``WARNING``."""
self.log(logging.WARNING, msg, *args, **kwargs)
def info(self, msg, *args, **kwargs):
"""Logs ``msg % args`` with severity ``INFO``."""
self.log(logging.INFO, msg, *args, **kwargs)
def debug(self, msg, *args, **kwargs):
"""Logs ``msg % args`` with severity ``DEBUG``."""
self.log(logging.DEBUG, msg, *args, **kwargs)
def log(self, level, msg, *args, **kwargs):
"""Logs a message at a cetain level substituting in the supplied arguments.
This method behaves differently in python and c++ modes.
Args:
level: int, the standard logging level at which to log the message.
msg: str, the text of the message to log.
*args: The arguments to substitute in the message.
**kwargs: The keyword arguments to substitute in the message.
"""
if level >= logging.FATAL:
# Add property to the LogRecord created by this logger.
# This will be used by the ABSLHandler to determine whether it should
# treat CRITICAL/FATAL logs as really FATAL.
extra = kwargs.setdefault('extra', {})
extra[_ABSL_LOG_FATAL] = True
super(ABSLLogger, self).log(level, msg, *args, **kwargs)
def handle(self, record):
"""Calls handlers without checking ``Logger.disabled``.
Non-root loggers are set to disabled after setup with :func:`logging.config`
if it's not explicitly specified. Historically, absl logging will not be
disabled by that. To maintaining this behavior, this function skips
checking the ``Logger.disabled`` bit.
This logger can still be disabled by adding a filter that filters out
everything.
Args:
record: logging.LogRecord, the record to handle.
"""
if self.filter(record):
self.callHandlers(record)
@classmethod
def register_frame_to_skip(cls, file_name, function_name, line_number=None):
"""Registers a function name to skip when walking the stack.
The :class:`~absl.logging.ABSLLogger` sometimes skips method calls on the
stack to make the log messages meaningful in their appropriate context.
This method registers a function from a particular file as one
which should be skipped.
Args:
file_name: str, the name of the file that contains the function.
function_name: str, the name of the function to skip.
line_number: int, if provided, only the function with this starting line
number will be skipped. Otherwise, all functions with the same name
in the file will be skipped.
"""
if line_number is not None:
cls._frames_to_skip.add((file_name, function_name, line_number))
else:
cls._frames_to_skip.add((file_name, function_name))
def _get_thread_id():
"""Gets id of current thread, suitable for logging as an unsigned quantity.
If pywrapbase is linked, returns GetTID() for the thread ID to be
consistent with C++ logging. Otherwise, returns the numeric thread id.
The quantities are made unsigned by masking with 2*sys.maxint + 1.
Returns:
Thread ID unique to this process (unsigned)
"""
thread_id = threading.get_ident()
return thread_id & _THREAD_ID_MASK
def get_absl_logger():
"""Returns the absl logger instance."""
return _absl_logger
def get_absl_handler():
"""Returns the absl handler instance."""
return _absl_handler
def use_python_logging(quiet=False):
"""Uses the python implementation of the logging code.
Args:
quiet: No logging message about switching logging type.
"""
get_absl_handler().activate_python_handler()
if not quiet:
info('Restoring pure python logging')
_attempted_to_remove_stderr_stream_handlers = False
def use_absl_handler():
"""Uses the ABSL logging handler for logging.
This method is called in :func:`app.run()<absl.app.run>` so the absl handler
is used in absl apps.
"""
global _attempted_to_remove_stderr_stream_handlers
if not _attempted_to_remove_stderr_stream_handlers:
# The absl handler logs to stderr by default. To prevent double logging to
# stderr, the following code tries its best to remove other handlers that
# emit to stderr. Those handlers are most commonly added when
# logging.info/debug is called before calling use_absl_handler().
handlers = [
h for h in logging.root.handlers
if isinstance(h, logging.StreamHandler) and h.stream == sys.stderr]
for h in handlers:
logging.root.removeHandler(h)
_attempted_to_remove_stderr_stream_handlers = True
absl_handler = get_absl_handler()
if absl_handler not in logging.root.handlers:
logging.root.addHandler(absl_handler)
FLAGS['verbosity']._update_logging_levels() # pylint: disable=protected-access
FLAGS['logger_levels']._update_logger_levels() # pylint: disable=protected-access
def _initialize():
"""Initializes loggers and handlers."""
global _absl_logger, _absl_handler
if _absl_logger:
return
original_logger_class = logging.getLoggerClass()
logging.setLoggerClass(ABSLLogger)
_absl_logger = logging.getLogger('absl')
logging.setLoggerClass(original_logger_class)
python_logging_formatter = PythonFormatter()
_absl_handler = ABSLHandler(python_logging_formatter)
_initialize()
|
bazelbuild/bazel
|
third_party/py/abseil/absl/logging/__init__.py
|
__init__.py
|
py
| 38,729 |
python
|
en
|
code
| 21,632 |
github-code
|
6
|
38474237620
|
from trainer import image_classifier, augmentation_pipeline,GCSHelper
import argparse
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def run(data_directory, output_directory, project_id,augment_flag,augment_samples,nr_epochs,drop_out,val_split,model,batch_size,check_overfit):
image_classifier.check_input(project_id=project_id, data_dir=data_directory, output_dir=output_directory,
validation_split=val_split, num_epochs=nr_epochs, dropout=drop_out,
augmentation_samples=augment_samples)
print('AUGMENTING IMAGES...')
if augment_flag:
augmentation_pipeline.augmentImages(project_id=project_id, data_dir=data_directory, sample_size=augment_samples,cloudML=True)
print('AUGMENTING IMAGES DONE!')
print('TRAINING MODEL...')
image_classifier.retrain(project_id, data_directory, batch_size=batch_size, model=model, dropout=drop_out, num_epochs=nr_epochs,
validation_split=val_split, output_dir=output_directory, cloud_mode=True,check_overfit=check_overfit)
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, help='directory of data')
parser.add_argument('--output_dir', type=str, help='directory of output model')
parser.add_argument('--project_id', type=str, default="trainer-cl", help='Google cloud projectID')
parser.add_argument('--aug_flag', type=str2bool, default=False, help='True if augmentation is done on images')
parser.add_argument('--aug_samples', type=int, default=1, help='extra augmentation samples that are added per category')
parser.add_argument('--nr_epochs', type=int, default=1, help='extra augmentation samples that are added per category')
parser.add_argument('--drop_out', type=float, default=0.1, help='Amount of droppout to prevent overfitting')
parser.add_argument('--val_split', type=float, default=0.1, help='Percentage of data used for validation')
parser.add_argument('--model', type=str, default="MobileNet", help='Used model architecture')
parser.add_argument('--batch_size', type=int, default=16, help='Batch size used for model training')
parser.add_argument('--check_overfit', type=str2bool, default=True, help='Add early stopping check')
args = parser.parse_args()
try:
run(args.data_dir,args.output_dir,args.project_id,args.aug_flag,args.aug_samples,args.nr_epochs,args.drop_out,args.val_split,args.model,args.batch_size,args.check_overfit)
GCSHelper.uploadClosingStatusFilesToGCS(args.project_id,[],'done.txt',args.output_dir)
except Exception as e:
GCSHelper.uploadClosingStatusFilesToGCS(args.project_id,[str(e)], 'wrong.txt', args.output_dir)
|
chrike-platinum/Cloud_ML_Template
|
trainer/task.py
|
task.py
|
py
| 2,861 |
python
|
en
|
code
| 0 |
github-code
|
6
|
19407988415
|
from flask import Flask, jsonify, request
from flask_cors import CORS
from flask_jwt_extended import create_access_token, JWTManager
from flask_mysqldb import MySQL
from dotenv import load_dotenv
import os
from datetime import datetime
app = Flask(__name__)
load_dotenv()
app.config['MYSQL_HOST'] = os.environ.get('MYSQL_HOST')
app.config['MYSQL_USER'] = os.environ.get('MYSQL_USER')
app.config['MYSQL_PASSWORD'] = os.environ.get('MYSQL_PASSWORD')
app.config['MYSQL_DB'] = os.environ.get('MYSQL_DB')
mysql = MySQL(app)
CORS(app, resources={r"/*": {"origins": "*"}}, supports_credentials=True)
@app.route('/coords', methods=["GET"])
def get_coords():
try:
cursor = mysql.connection.cursor()
cursor.execute("SELECT latitude, longitude FROM sample")
results = cursor.fetchall()
cursor.close()
coords = [{'lat': row[0], 'lng': row[1]} for row in results]
return jsonify({'coords': coords})
except Exception as e:
return jsonify({'error': str(e)}), 500
|
RogelioBenavides/frida-kitchen
|
tracking_service/routes/tracking.py
|
tracking.py
|
py
| 1,020 |
python
|
en
|
code
| 1 |
github-code
|
6
|
24059326549
|
from PIL import Image
from os import listdir, mkdir
def PrepareChars5x7(jmeno, mezX, mezY):
im = Image.open(jmeno)
Pixels = im.load()
for x in range(13):
for y in range(4):
imnew = Image.new(mode="RGB", size=(5, 7))
pole = imnew.load()
print(pole[1, 1], imnew.size)
for x2 in range(5):
for y2 in range(7):
pole[x2, y2] = Pixels[x2 + (5 + mezX) * x, y2 + (7 + mezY) * y]
imnew.save("Characters/ch" + str(x + 13 * y) + ".png")
def Roztrid():
seznam = listdir("Characters")
for polozka in seznam:
im = Image.open("Characters/" + polozka)
pixels = im.load()
hodnota = 0
for x in range(5):
for y in range(7):
if pixels[x,y][0] != 0:
hodnota += 1
if str(hodnota) not in listdir("Characters"):
mkdir("Characters/" + str(hodnota))
im.save("Characters/" + str(hodnota) + "//" + polozka)
|
MedOndrej/ASCIIart
|
Preparation.py
|
Preparation.py
|
py
| 1,019 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30478191100
|
# Merge k Sorted Linked Lists and return in form of Array
class Node:
def __init__(self, head):
self.head = head
self.next = None
def merge(self, left, right):
if left is None:
return right
if right is None:
return left
ans = Node(-1)
temp = ans
while left and right:
if left.data <= right.data:
temp.next = left
temp = left
left = left.next
else:
temp.next = right
temp = right
right = right.next
while left:
temp.next = left
temp = left
left = left.next
while right:
temp.next = right
temp = right
right = right.next
return ans.next
def mergeKLists(self, arr ,k):
for i in range(0, k - 1):
temp = self.merge(arr[i], arr[i + 1])
arr[i + 1] = temp
return arr[k - 1]
# Runtime Error
# Optimal Solution using Priority Queue
|
prabhat-gp/GFG
|
Linked List/Love Babbar/26_merge_k_sorted_ll.py
|
26_merge_k_sorted_ll.py
|
py
| 1,084 |
python
|
en
|
code
| 0 |
github-code
|
6
|
14040284357
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Picture',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('image', models.ImageField(null=True, upload_to=b'media/product_pictures', blank=True)),
('description', models.CharField(max_length=140, null=True, blank=True)),
('default_picture', models.BooleanField(default=False)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=140)),
('description', models.TextField(null=True, blank=True)),
('total_number_of_tickets', models.IntegerField()),
('tickets_sold', models.IntegerField()),
('end_time', models.DateTimeField()),
('start_time', models.DateTimeField()),
('pricing_per_ticket', models.DecimalField(max_digits=8, decimal_places=2)),
('winning_ticket_number', models.IntegerField()),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Ticket',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('ticket_number', models.IntegerField()),
('product', models.ForeignKey(related_name='tickets', to='ticketing.Product')),
('user', models.ForeignKey(related_name='tickets', to=settings.AUTH_USER_MODEL)),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='picture',
name='product',
field=models.ForeignKey(related_name='pictures', to='ticketing.Product'),
preserve_default=True,
),
]
|
yenbryan/raffle
|
ticketing/migrations/0001_initial.py
|
0001_initial.py
|
py
| 2,454 |
python
|
en
|
code
| 0 |
github-code
|
6
|
25072840538
|
#!/usr/bin/env python3
#_*_ coding: utf8 _*_
#------------------------------------------------------------
#----- GUILLOTINE -----|
# ---- FINDER HTTP SECURITY HEADERS ----|
# ---- Gohanckz ----|
# ---- Contact : [email protected] ----|
# ---- Version : 2.0 ----|
#------------------------------------------------------------
try:
from banner import banner
from prettytable import PrettyTable
import requests
import argparse
from urllib3.exceptions import InsecureRequestWarning
except ImportError as err:
print("Some libraries are missing:")
print(err)
parser = argparse.ArgumentParser(description="Finder Security Headers")
parser.add_argument("-t","--target",help="Show http security headers enabled and missing")
parser.add_argument("-v","--verbose",action="store_true",help="Show full response")
parser = parser.parse_args()
try:
requests.packages.urllib3.disable_warnings(category=InsecureRequestWarning)
url = requests.get(url=parser.target, verify=False)
security_headers = [
"Strict-Transport-Security",
"X-Frame-Options",
"X-Content-Type-Options",
"Content-Security-Policy",
"X-Permitted-Cross-Domain-Policies",
"Referrer-Policy",
"Clear-Site-Data",
"Cross-Origin-Embedder-Policy",
"Cross-Origin-Opener-Policy",
"Cross-Origin-Resource-Policy",
"Cache-Control"
]
info_headers = []
headers_site = []
security_headers_site = []
missing_headers = []
headers = dict(url.headers)
for i in headers:
headers_site.append(i)
for i in headers:
info_headers.append(headers[i])
for i in headers_site:
if i in security_headers:
security_headers_site.append(i)
for j in security_headers:
if not j in [h for h in headers_site]:
missing_headers.append(j)
table = PrettyTable()
table.add_column("Header",headers_site)
table.add_column("Information",info_headers)
table.align="l"
while len(security_headers_site) < len(missing_headers):
security_headers_site.append(" ")
while len(security_headers_site) > len(missing_headers):
missing_headers.append(" ")
count = 0
for i in security_headers_site:
if i != " ":
count += 1
count_m = 0
for j in missing_headers:
if j != " ":
count_m +=1
s_table = PrettyTable()
s_table.add_column("Enabled Security Header",security_headers_site)
s_table.add_column("Missing Security Header",missing_headers)
s_table.align="l"
except:
print("[!] time out, unable to connect to site.")
def main():
banner()
try:
print("\n[*] Analyzing target : ",parser.target)
print("[*] Security headers enabled :", count)
print("[*] Missing Security Headers :",count_m)
except:
print("[!] Syntax Error.")
print("[+] Usage: python3 guillotine.py -t http://example.site")
def target():
try:
print(s_table)
except:
pass
def verbose():
try:
print(table)
except:
pass
if __name__ == '__main__':
main()
if parser.verbose:
verbose()
elif parser.target:
target()
|
Gohanckz/guillotine
|
guillotine.py
|
guillotine.py
|
py
| 3,431 |
python
|
en
|
code
| 12 |
github-code
|
6
|
32936869929
|
# 导入所需的库
import jieba
import docx
from docx import Document
from docx.shared import Inches
import matplotlib.pyplot as plt
from wordcloud import WordCloud, STOPWORDS
# 读取文档内容
filter_words = ['', '','','','','','','']
document = Document('221.docx')
text = ''
text= jieba.cut(text)
text = ''.join(str(x) for x in text)
for paragraph in document.paragraphs:
text += paragraph.text + ' '
for word in filter_words:
text = text.replace(word, '')
# 创建停用词集合
stopwords = set(STOPWORDS)
stopwords = ['同志们', '二','三','四','五','一','六','七','八','九','十','']
# 创建词云对象,并设置参数
wordcloud = WordCloud(
font_path="simhei.ttf",
width=1200, height=800,
background_color='white',
stopwords=stopwords,
min_font_size=10).generate(text)
# 绘制词云图
plt.figure(figsize=(8, 8), facecolor=None)
plt.imshow(wordcloud)
plt.axis("off")
plt.tight_layout(pad=0)
plt.show()
# 创建需要过滤的词汇列表
# 加载需要过滤的文本
text = 'I hate this bad movie, it is so ugly and boring.'
# 使用字符串函数 replace() 进行替换
print(text)
|
lingqingjiuying/9ying1
|
day1class1.py
|
day1class1.py
|
py
| 1,150 |
python
|
en
|
code
| 0 |
github-code
|
6
|
39359053941
|
import time
from selenium.webdriver.support.ui import Select
from selenium.webdriver.support.ui import WebDriverWait
import selenium
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.service import service
#driver = webdriver.Chrome(executable_path="G:\driver\chromedriver_win32\chromedriver.exe") #my computer
driver=webdriver.Chrome()
driver.implicitly_wait(40)
driver.get("https://www.facebook.com/")
driver.find_element(By.XPATH,'//a[@role="button"and@data-testid="open-registration-form-button"]').click()
time.sleep(2)
driver.find_element(By.XPATH,'//input[@name="firstname"and@aria-label="First name"]').send_keys('pavithran')
time.sleep(2)
driver.find_element(By.XPATH,'//input[@name="lastname"and@aria-label="Surname"]').send_keys('sethu')
time.sleep(2)
driver.find_element(By.XPATH,'//input[@aria-label="Mobile number or email address"]').send_keys('9784561524')
driver.find_element(By.XPATH,'//input[@id="password_step_input"and@type="password"]').send_keys('Passcode')
time.sleep(2)
print("--------days-------")
days_elements=driver.find_element(By.ID,"day")#assign the id
days=Select(days_elements)#selecting the all elements
#giving the values manually to the dropdownlist
days.select_by_visible_text("17")#text method
time.sleep(2)
days.select_by_index(2)#index method
time.sleep(2)
days.select_by_value("6")#value method
time.sleep(2)
days_elements.send_keys("25")#send my value to days dropdown box NORMAL METHOD
print("get attribute method the value sent to the dropbox:",days_elements.get_attribute('value')) #get my value from dropbox
time.sleep(2)
totaloptions=len(days.options)#to find total options available in days
print("Total options in day dropdownlist:",totaloptions)#31 options are there
opsd=days.options#to get all options
print("total options")#just for heading
for option in opsd:#for loop
print("option text is-{}-option value is={}".format(option.text,option.get_attribute("value")))
print("--using range--")
for x in range(0,30):
print(opsd[x].text)
print("--days after 20th\n--")
for x in opsd:
y=x.get_attribute("value")
z=int(y)
if z>=20:
print(x.text)
print("--days between 10 to 25\n--")
for x in opsd:
y=x.get_attribute("value")
z=int(y)
if z>=10 and z<=25:
print(x.text)
print('-----month-----')
#month
month_element=driver.find_element(By.ID,'month')
months=Select(month_element)
months.select_by_value("2")#feb
time.sleep(2)
months.select_by_index(4)
time.sleep(2)
months.select_by_visible_text("Aug")
month_length=len(months.options)
print("total months options are available in facebook\n:",month_length)
ops=months.options
for option in ops:
print("option text is-{}-option value is={}".format(option.text, option.get_attribute("value")))
#using range printing text
print("--using range--")
for x in range(0,12):
print(ops[x].text)
print("----last 3 months---\n")
for x in ops:
y=(x.get_attribute('value'))
z=int(y)
if z>=10:
print(x.text)
print("----between months:----\n")
for x in ops:
y=(x.get_attribute('value'))
z=int(y)
if z>=2 and z<=10:
print(x.text)
print("---1st 3 months\n---")
for x in ops:
y=(x.get_attribute('value'))
z=int(y)
if z<=3:
print(x.text)
print("-------year--------")
year_elements=driver.find_element(By.ID,"year")
years=Select(year_elements)
years.select_by_visible_text("1997")
time.sleep(3)
years.select_by_value("1996")
time.sleep(3)
years.select_by_index(1)#2021
totalyears=len(years.options)
print("total no of options in year:",totalyears)#118
opsy=years.options
for x in opsy:
print("year is={} year value is={}".format(x.text,x.get_attribute("value")))
print("--using range--")
for x in range(0,30):
print(opsy[x].text)
print("--years above 1997\n--")
for x in opsy:
y=x.get_attribute("value")
z=int(y)
if z>=1997:
print(x.text)
print("--years between 2000 to 1990\n--")
for x in opsy:
y=x.get_attribute("value")
z=int(y)
if z<=2000 and z>=1990:
print(x.text)
print(type(y))
print(type(z))
#gender selection
gender_f=driver.find_element(By.XPATH,'(//input[@type="radio"and@name="sex"])[1]').click()
status=driver.find_element(By.XPATH,'(//input[@type="radio"and@name="sex"])[1]').is_selected()
print(status)
time.sleep(3)
gender_m=driver.find_element(By.XPATH,'(//input[@type="radio"and@name="sex"])[2]').click()
status=driver.find_element(By.XPATH,'(//input[@type="radio"and@name="sex"])[2]').is_selected()
print(status)
time.sleep(3)
gender_c=driver.find_element(By.XPATH,'(//input[@type="radio"and@name="sex"])[3]').click()
status=driver.find_element(By.XPATH,'(//input[@type="radio"and@name="sex"])[3]').is_selected()
print(status)
custom=driver.find_element(By.XPATH,'//select[@aria-label="Select your pronoun"]')
custom_s=Select(custom)
custom_s.select_by_value("1")
time.sleep(2)
custom_s.select_by_value("2")
time.sleep(2)
customs=custom_s.options
for x in customs:
print(x.text)
driver.find_element(By.XPATH,'//input[@name="custom_gender"]').send_keys("they")
driver.find_element(By.XPATH,'//button[text()="Sign Up"]').click()
time.sleep(5)
driver.close()
|
Paviterence/Selenium-Python-BasicCodes
|
fb_select_method.py
|
fb_select_method.py
|
py
| 5,232 |
python
|
en
|
code
| 1 |
github-code
|
6
|
14974723036
|
import torch
import torch.nn as nn
from torch_geometric.loader import DataLoader
from torch_geometric.data import Data, Batch
from torch.utils.data import Dataset
import torch_geometric.transforms as T
from torch_geometric.nn import GATConv
import torch.nn.functional as F
class GATNet(torch.nn.Module):
def __init__(self, num_graph_node_features, num_boundary_node_features):
super(GATNet, self).__init__()
self.graph_conv1 = GATConv(num_graph_node_features, 32, heads=4)
input_of_conv2 = num_graph_node_features + 32*4
self.graph_conv2 = GATConv(input_of_conv2, 32, heads=8)
input_of_conv3 = num_graph_node_features + 32*8
self.graph_conv3 = GATConv(input_of_conv3, 64, heads=8)
input_of_conv4 = num_graph_node_features + 64*8
self.graph_conv4 = GATConv(input_of_conv4, 128, heads=8)
shape_of_graphs_befor_concatination = num_graph_node_features + 128*8
self.boundary_conv1 = GATConv(num_boundary_node_features, 32, heads=4)
input_of_boundary_conv2 = 32*4 + num_boundary_node_features
self.boundary_conv2 = GATConv(input_of_boundary_conv2, 32, heads=8)
shape_of_boundary_befor_concatination = num_boundary_node_features + 32 * 8
# Output of graph_conv8 + output of boundary_conv5 + 2 step connection from real nodes and boundary nodes
inputs_concatination = shape_of_graphs_befor_concatination + shape_of_boundary_befor_concatination
self.Concatination1 = GATConv(inputs_concatination, 128, heads=8)
self.width_layer1 = nn.Linear(128*8, 128)
self.height_layer1 = nn.Linear(128*8, 128)
self.width_output = nn.Linear(128, 1)
self.height_output = nn.Linear(128, 1)
self.dropout = torch.nn.Dropout(0.2)
def forward(self, graph, boundary):
x_graph, g_edge_index, g_edge_attr, g_batch = graph.x.to(torch.float32), graph.edge_index, graph.edge_attr, graph.batch
x_boundary, b_edge_indexy, b_edge_attr, b_batch = boundary.x.to(torch.float32), boundary.edge_index, boundary.edge_attr, boundary.batch
NUM_OF_NODES = x_graph.shape[0]
# During testing, as we input only one graph.
if g_batch == None:
g_batch = torch.zeros(x_graph.shape[0], dtype=torch.long)
if b_batch == None:
b_batch = torch.zeros(x_boundary.shape[0], dtype=torch.long)
x_graph_res = x_graph
x_boundary_res = x_boundary
# Passing the graph throught a message passing to embed its features
x_graph = F.leaky_relu(self.graph_conv1(x_graph, g_edge_index, g_edge_attr))
x_graph = self.dropout(x_graph) # Concatinate with step connection from real values.
x_graph = torch.cat([x_graph, x_graph_res], dim=1)
x_graph = F.leaky_relu(self.graph_conv2(x_graph, g_edge_index, g_edge_attr))
x_graph = self.dropout(x_graph)
x_graph = torch.cat([x_graph, x_graph_res], dim=1)
x_graph = F.leaky_relu(self.graph_conv3(x_graph, g_edge_index))
x_graph = self.dropout(x_graph)
x_graph = torch.cat([x_graph, x_graph_res], dim=1)
x_graph = F.leaky_relu(self.graph_conv4(x_graph, g_edge_index))
x_graph = self.dropout(x_graph)
x_graph = torch.cat([x_graph, x_graph_res], dim=1)
# Passing the boundary throught a message passing to embed its features
x_boundary = F.leaky_relu(self.boundary_conv1(x_boundary, b_edge_indexy, b_edge_attr))
x_boundary = self.dropout(x_boundary)
x_boundary = torch.cat([x_boundary, x_boundary_res], dim=1)
x_boundary = F.leaky_relu(self.boundary_conv2(x_boundary, b_edge_indexy, b_edge_attr))
x_boundary = self.dropout(x_boundary)
x_boundary = torch.cat([x_boundary, x_boundary_res], dim=1)
# Pooling the bounadry to 1D vector by getting max value in each feature for all nodes.
x_boundary_pooled = F.max_pool1d(x_boundary.transpose(0, 1), kernel_size=x_boundary.shape[0]).view(1, -1)
# Concatinating the graph & the boundary
x = torch.cat([x_graph, x_boundary_pooled.repeat(NUM_OF_NODES, 1)], dim=1)
x = F.leaky_relu(self.Concatination1(x, g_edge_index))
x = self.dropout(x)
width = F.leaky_relu(self.width_layer1(x))
width = self.dropout(width)
width = self.width_output(width)
height = F.leaky_relu(self.height_layer1(x))
height = self.dropout(height)
height = self.height_output(height)
return width.squeeze(), height.squeeze()
def load_model(checkpoint_path, device):
model = GATNet(9, 3)
model = model.to(device)
checkpoint = torch.load(checkpoint_path)
model.load_state_dict(checkpoint['model_state_dict'])
model.eval()
return model
|
mo7amed7assan1911/Floor_Plan_Generation_using_GNNs
|
model.py
|
model.py
|
py
| 5,015 |
python
|
en
|
code
| 3 |
github-code
|
6
|
30513354824
|
import os
import requests
from app import Processing
import nltk
from moviepy.editor import *
from pexels_api import API
from pathlib import Path
import time
import pyttsx3
# configurations of paths, output URL, file structure
# 16:9 ratios possible for upright smartphone usage
# 1080, 1920 --> FullHD resolution
# 540, 960 --> 1/4 data size compared to FullHD
# 270, 480 --> 1/8 data size compared to FullHD
WIDTH_OUT = 540/2
HEIGHT_OUT = 960/2
screensize = (WIDTH_OUT, HEIGHT_OUT)
FONT = "Helvetica-Bold"
FONTSIZE_MAIN = WIDTH_OUT * 0.1
FONTSIZE_SUB = WIDTH_OUT * 0.03
FONT_COLOUR = "white"
PADDING = WIDTH_OUT * 0.1
readingSpeed = 0.2
audio_dir_emotional = "static/music/emotional.mp3"
audio_dir_promo = "static/music/promo.mp3"
audio_dir_neutral = "static/music/neutral.mp3"
audio_emotional = AudioFileClip(audio_dir_emotional, fps=44100)
audio_neutral = AudioFileClip(audio_dir_neutral, fps=44100)
audio_promo = AudioFileClip(audio_dir_promo, fps=44100)
ABS_PATH = os.path.abspath(__file__) # "/app.py"
BASE_DIR = os.path.dirname(ABS_PATH) # "/"
Path(os.path.join(BASE_DIR, "downloads")).mkdir(parents=True, exist_ok=True)
OUTPUT = os.path.join(BASE_DIR, "downloads")
# API setups
PEXELS_API_KEY = os.getenv("PEXELS_API_KEY")
api = API(PEXELS_API_KEY)
def dl_img(url, filename):
print(filename)
r = requests.get(url, allow_redirects=True)
open(filename, 'wb').write(r.content)
return filename
def pexels_fetch(to_download):
downloaded_files = []
n = 0
for i in to_download:
api.search(" ".join(i), page=1, results_per_page=1)
dl = api.get_entries()
print(dl)
img = [
dl_img(dl[0].large, os.path.join(OUTPUT, str("image_downloaded_" + str(n) + ".jpg"))),
dl[0].photographer
]
downloaded_files.append(img)
n += 1
return downloaded_files
def zoom(file, t):
f = (ImageClip(file)
.resize(height=screensize[1])
.resize(lambda t: 1 + 0.02 * t)
.set_position(('center', 'center'))
.set_duration(t)
)
f = resize_to_ouput_size(f)
# cvc = ImageClip(f, t)
return f
def resize_to_ouput_size(f):
if f.w < WIDTH_OUT:
f = f.resize(width=WIDTH_OUT)
if f.h < HEIGHT_OUT:
f = f.resize(height=HEIGHT_OUT)
f = f.crop(x_center=f.w / 2, y_center=f.h / 2, width=WIDTH_OUT, height=HEIGHT_OUT)
return f
'''
# voiceover functionality deprecated due to non-existent espeak support on heroku
def voiceover(textSnippet, i):
engine = pyttsx3.init()
print(f"inside voiceover func, processing: {textSnippet} \nIsBusy is set to {engine.isBusy()}")
audioFileName = f"voiceover text segment no. {i}.mp3"
engine.save_to_file(textSnippet, audioFileName)
engine.runAndWait()
# engine.stop()
print(f"text to speech worked correctly? \nisBusy is set to {engine.isBusy()}")
return audioFileName
'''
def overlay_text(file, i):
overlay = TextClip(file.text_segmented[i],
size=(WIDTH_OUT * 0.9, HEIGHT_OUT),
color=FONT_COLOUR,
method="caption",
align="East",
fontsize=FONTSIZE_MAIN,
font=FONT
)
combined = CompositeVideoClip([overlay, overlay_attribution(file.downloaded_items[i][1])])
# voiceover functionality deprecated
# if file.voiceover == True or file.voiceover == "true" or file.voiceover == "True":
# audio_clip_temp = AudioFileClip(voiceover(file.text_segmented[i], i), fps=44100)
# combined = combined.set_audio(audio_clip_temp)
combined = combined.set_duration(file.text_timing[i])
return combined
def overlay_attribution(text):
attribution = TextClip(f"Image from www.pexels.com by: {text}",
size=(WIDTH_OUT, HEIGHT_OUT * 0.95),
color=FONT_COLOUR,
fontsize=FONTSIZE_SUB,
align="south",
method="caption",
font=FONT
)
attribution = attribution.set_position((0, 0.97), relative=True)
return attribution
def create_kopfkino(content):
file = Processing(user_input=content.get("user_input"), style=content.get("style"), voiceover=content.get("voiceover"))
print(f"voiceover from content JSON is set to: {file.voiceover}")
nlp_testing_2(file)
print(file.downloaded_items)
print(file.text_searchwords)
file.downloaded_items = pexels_fetch(file.text_searchwords)
for i in range(0, len(file.downloaded_items)):
file.footage.append(zoom(file.downloaded_items[i][0], file.text_timing[i]))
for i in range(0, len(file.text_segmented)):
clip = overlay_text(file, i)
combined = CompositeVideoClip([file.footage[i], clip])
file.footage_and_text.append(combined)
file.export_file = concatenate(file.footage_and_text)
if file.style == "neutral":
file.export_file = file.export_file.set_audio(audio_neutral.set_duration(file.export_file.duration))
elif file.style == "emotional":
file.export_file = file.export_file.set_audio(audio_emotional.set_duration(file.export_file.duration))
elif file.style == "promo":
file.export_file = file.export_file.set_audio(audio_promo.set_duration(file.export_file.duration))
else:
file.export_file = file.export_file.set_audio(audio_neutral.set_duration(file.export_file.duration))
file.export_file.write_videofile(os.path.join(OUTPUT, f"Kopfkino_export_in workerinstance.mp4"), codec='libx264',
audio_codec='aac', fps=24)
with open(os.path.join(OUTPUT, f"Kopfkino_export_in workerinstance.mp4"), "rb") as trans:
result = trans.read()
return result
def nlp_testing_2(file):
text_raw = file.user_input
print(text_raw)
file.text_segmented = nltk.sent_tokenize(text_raw)
for i in range(0, len(file.text_segmented)):
n = 0
for c in file.text_segmented[i]:
n += 1
n = round(n * readingSpeed, 1)
if n < 5:
n = 5
file.text_timing.append(n)
text_segmented_to_words = nltk.word_tokenize(file.text_segmented[i])
file.text_searchwords.append([])
print(f"POS Tags{nltk.pos_tag(text_segmented_to_words)}")
for p in nltk.pos_tag(text_segmented_to_words):
if p[1] in {"JJ", "NN", "NNS", "VB"}:
print(f"found word {p} and put it to the searchwords")
file.text_searchwords[i].append(p[0])
for x in file.text_searchwords:
if len(x) == 0:
x.append("error")
print("-------> ERROR HANDLING NEEDED: No searchword left: appended full sentence OR error")
return f"\nsegmented: {file.text_segmented}, \ntimings: {file.text_timing} \nsearchwords: {file.text_searchwords}"
|
oliverkoetter/kopfkino
|
tasks.py
|
tasks.py
|
py
| 6,989 |
python
|
en
|
code
| 2 |
github-code
|
6
|
16536913637
|
import pandas as pd
dataset = pd.read_csv('iris.csv')
data = dataset.iloc[ : 99 , :]
target = data.iloc[ : , -1: ]
y = []
for x in target.values:
if x == 'Iris-setosa':
y.append(1)
else:
y.append(0)
x = data.iloc[ : , : -1]
x = x.values.tolist()
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split
import numpy as np
shuffle(x, y)
x_train = []
x_test = []
y_train = []
y_test = []
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.1)
x_train = np.array(x_train)
y_train = np.array(y_train)
x_test = np.array(x_test)
y_test = np.array(y_test)
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LogisticRegression
clf = LogisticRegression()
clf.fit(x_train,y_train)
y_pred = clf.predict(x_test)
print(accuracy_score(y_test,y_pred))
|
Nuhru1/Machine_Learning_Logistic_Regression_From_Scratch
|
Logistic_Regression_with_Sklearn.py
|
Logistic_Regression_with_Sklearn.py
|
py
| 897 |
python
|
en
|
code
| 0 |
github-code
|
6
|
35041146572
|
import copy
import json
import logging
import os
from threading import Thread
import requests
import six
import yaml
from toscaparser.tosca_template import ToscaTemplate
from yaml import Loader
from configuration_tool.common.tosca_reserved_keys import IMPORTS, DEFAULT_ARTIFACTS_DIRECTORY, \
EXECUTOR, NAME, TOSCA_ELEMENTS_MAP_FILE, TOSCA_ELEMENTS_DEFINITION_FILE, TOPOLOGY_TEMPLATE, TYPE, \
TOSCA_ELEMENTS_DEFINITION_DB_CLUSTER_NAME, NODE_TEMPLATES, RELATIONSHIP_TEMPLATES
from configuration_tool.common import utils
from configuration_tool.common.configuration import Configuration
from configuration_tool.configuration_tools.ansible.instance_model.instance_model import update_instance_model
from configuration_tool.configuration_tools.combined.combine_configuration_tools import get_configuration_tool_class
from configuration_tool.providers.common.provider_configuration import ProviderConfiguration
from configuration_tool.providers.common.tosca_template import ProviderToscaTemplate
REQUIRED_CONFIGURATION_PARAMS = (TOSCA_ELEMENTS_DEFINITION_FILE, DEFAULT_ARTIFACTS_DIRECTORY, TOSCA_ELEMENTS_MAP_FILE)
REQUIRED_CONFIGURATION_PARAMS = (TOSCA_ELEMENTS_DEFINITION_FILE, DEFAULT_ARTIFACTS_DIRECTORY, TOSCA_ELEMENTS_MAP_FILE)
def load_to_db(node_templates, relationship_templates, config, database_api_endpoint, template, cluster_name):
definitions = {}
all_templates = node_templates
all_templates = utils.deep_update_dict(all_templates, relationship_templates)
def_cluster = config.get_section(config.MAIN_SECTION).get(TOSCA_ELEMENTS_DEFINITION_DB_CLUSTER_NAME)
for key, value in all_templates.items():
type = value[TYPE]
r = requests.get(utils.get_url_for_getting_dependencies(def_cluster, database_api_endpoint, type))
try:
response = r.json()
except Exception:
raise Exception("Failed to parse json response from db")
if response['status'] != 200:
raise Exception("Error in db! Status code: %s, msg: %s" % (response['status'], response['message']))
definitions = utils.deep_update_dict(definitions, response['result'])
with open(os.path.join(utils.get_tmp_clouni_dir(), 'template.yaml'), "w") as f:
template = utils.deep_update_dict(template, definitions)
del template[IMPORTS]
print(yaml.dump(template, Dumper=utils.NoAliasDumper), file=f)
with open(os.path.join(utils.get_tmp_clouni_dir(), 'template.yaml'), "r") as f:
files = {'file': f}
res = requests.post(utils.get_url_for_loading_to_db(cluster_name, database_api_endpoint), files=files)
try:
response = res.json()
except Exception:
raise Exception("Failed to parse json response from db on loading template")
if response['status'] != 200:
raise Exception("Error in db! Status code: %s, msg: %s" % (response['status'], response['message']))
def translate(provider_template, validate_only, configuration_tool, cluster_name, is_delete=False,
extra=None, log_level='info', debug=False, host_ip_parameter='public_address',
database_api_endpoint=None, grpc_cotea_endpoint=None):
log_map = dict(
debug=logging.DEBUG,
info=logging.INFO,
warning=logging.WARNING,
error=logging.ERROR,
critical=logging.ERROR
)
logging_format = "%(asctime)s %(levelname)s %(message)s"
logging.basicConfig(filename='.clouni-configuration-tool.log', filemode='a', level=log_map[log_level],
format=logging_format, datefmt='%Y-%m-%d %H:%M:%S')
config = Configuration()
template = yaml.load(provider_template, Loader=Loader)
topology_template = template.get(TOPOLOGY_TEMPLATE)
# tmp version - provider gets from first node template (can't use different providers in template)
provider = None
for key in topology_template.get('node_templates').keys():
provider_template_name = key
tosca_type = topology_template.get('node_templates').get(provider_template_name).get('type')
(provider, _, _) = utils.tosca_type_parse(tosca_type)
if provider in ['openstack', 'amazon', 'kubernetes']: # TODO: make config prividers file!
break
provider_config = ProviderConfiguration(provider)
for sec in REQUIRED_CONFIGURATION_PARAMS:
if sec not in config.get_section(config.MAIN_SECTION).keys():
logging.error('Provider configuration parameter "%s" is missing in configuration file' % sec)
raise Exception('Provider configuration parameter "%s" is missing in configuration file' % sec)
def_files = config.get_section(config.MAIN_SECTION).get(TOSCA_ELEMENTS_DEFINITION_FILE)
if isinstance(def_files, six.string_types):
def_files = [def_files]
provider_def_files = provider_config.get_section(config.MAIN_SECTION).get(TOSCA_ELEMENTS_DEFINITION_FILE)
if isinstance(provider_def_files, six.string_types):
provider_def_files = [provider_def_files]
default_import_files = []
for def_file in def_files:
default_import_files.append(os.path.join(utils.get_project_root_path(), def_file))
for def_file in provider_def_files:
default_import_files.append(os.path.join(utils.get_project_root_path(), 'configuration_tool', 'providers',
provider, def_file))
logging.info("Default TOSCA template definition file to be imported \'%s\'" % json.dumps(default_import_files))
# Add default import of normative TOSCA types to the template
template[IMPORTS] = template.get(IMPORTS, [])
for i in range(len(template[IMPORTS])):
if isinstance(template[IMPORTS][i], dict):
for import_key, import_value in template[IMPORTS][i].items():
if isinstance(import_value, six.string_types):
template[IMPORTS][i] = import_value
elif isinstance(import_value, dict):
if import_value.get('file', None) is None:
logging.error("Imports %s doesn't contain \'file\' key" % import_key)
raise Exception("Imports %s doesn't contain \'file\' key" % import_key)
else:
template[IMPORTS][i] = import_value['file']
if import_value.get('repository', None) is not None:
logging.warning("Clouni doesn't support imports \'repository\'")
template[IMPORTS].extend(default_import_files)
for i in range(len(template[IMPORTS])):
template[IMPORTS][i] = os.path.abspath(template[IMPORTS][i])
if template.get(TOPOLOGY_TEMPLATE):
tmpl = template.get(TOPOLOGY_TEMPLATE)
if database_api_endpoint:
if not tmpl.get(NODE_TEMPLATES):
tmpl[NODE_TEMPLATES] = {}
if not tmpl.get(RELATIONSHIP_TEMPLATES):
tmpl[RELATIONSHIP_TEMPLATES] = {}
load_to_db(tmpl[NODE_TEMPLATES], tmpl[RELATIONSHIP_TEMPLATES], config, database_api_endpoint, template, cluster_name)
else:
if tmpl.get(NODE_TEMPLATES):
node_templates = tmpl.get(NODE_TEMPLATES)
for elem in node_templates:
update_instance_model(cluster_name, node_templates[elem], node_templates[elem][TYPE], elem, [], [], is_delete, init=True)
if tmpl.get(RELATIONSHIP_TEMPLATES):
rel_templates = tmpl.get(RELATIONSHIP_TEMPLATES)
for elem in rel_templates:
update_instance_model(cluster_name, rel_templates[elem], rel_templates[elem][TYPE], elem, [], [], is_delete, init=True)
copy_of_template = copy.deepcopy(template)
try:
tosca_parser_template_object = ToscaTemplate(yaml_dict_tpl=copy_of_template)
except Exception as e:
logging.exception("Got exception from OpenStack tosca-parser: %s" % e)
raise Exception("Got exception from OpenStack tosca-parser: %s" % e)
# After validation, all templates are imported
if validate_only:
msg = 'The input "%(template_file)s" successfully passed validation. \n' \
% {'template_file': 'TOSCA template'}
return msg
tosca = ProviderToscaTemplate(template, provider, configuration_tool, cluster_name,
host_ip_parameter, is_delete, grpc_cotea_endpoint)
tool = get_configuration_tool_class(configuration_tool)(provider)
default_artifacts_directory = config.get_section(config.MAIN_SECTION).get(DEFAULT_ARTIFACTS_DIRECTORY)
configuration_content = tool.to_dsl(provider, tosca.provider_operations, tosca.reversed_provider_operations,
tosca.cluster_name, is_delete, target_directory=default_artifacts_directory,
extra=extra, debug=debug,
grpc_cotea_endpoint=grpc_cotea_endpoint)
return configuration_content
|
sadimer/clouni_configuration_tool
|
configuration_tool/common/translator_to_configuration_dsl.py
|
translator_to_configuration_dsl.py
|
py
| 9,016 |
python
|
en
|
code
| 0 |
github-code
|
6
|
37220130073
|
# 46__method_chaining
class Car:
def turn_on(self):
print("You started the engine")
turned_on = "turned on"
return self
def turn_off(self):
print("You turned of the engine")
return self
def brake(self):
print("You stepped on the brake")
return self
def drive(self):
print("You drive the car")
print("You have previously")
return self
car = Car()
# Tidigare med två rader
# car.turn_on()
# car.drive()
car.turn_on().drive()
print("-------")
car.brake().turn_off()
print("---------")
car.turn_on().drive().brake().turn_off()
|
GGisMee/Python
|
pfc/tutorials/46__method_chaining.py
|
46__method_chaining.py
|
py
| 627 |
python
|
en
|
code
| 3 |
github-code
|
6
|
69928685308
|
from django.db import models
class Vehicles(models.Model):
class Meta:
ordering = [ 'year']
id = models.AutoField(
primary_key = True
)
year_min = 1900
year_max = 2100
year = models.IntegerField(
'Year',
)
man_max_len = 50
manufacturer = models.CharField(
'Manufacturer',
max_length = man_max_len,
)
model_max_len = 100
model = models.CharField(
'model',
max_length = model_max_len
)
sn_max_len = 15
serial_no = models.CharField(
'Serial Number',
unique = True,
max_length = sn_max_len
)
|
babarehner/carwork
|
carrepairs/models.py
|
models.py
|
py
| 640 |
python
|
en
|
code
| 0 |
github-code
|
6
|
15996890764
|
from django.contrib import admin
from django.urls import path
from . import views
urlpatterns = [
path('api/songs', views.SongsView.as_view(), name='songs'),
path('api/songs/<int:song_id>', views.SongInfoView.as_view(), name='song_info'),
path('api/songs/search/', views.SongSearchView.as_view(), name='song_search'),
path('api/artists', views.ArtistsView.as_view(), name='artists'),
path('api/artists/<int:artist_id>', views.ArtistInfoView.as_view(), name='artist_info'),
path('api/albums', views.AlbumsView.as_view(), name='albums'),
path('api/albums/<int:album_id>', views.AlbumInfoView.as_view(), name='album_info'),
]
|
artooff/2023-MAI-Backend-A-Artov
|
lab3/musicProject/musicService/urls.py
|
urls.py
|
py
| 652 |
python
|
en
|
code
| 0 |
github-code
|
6
|
37030043869
|
import PySimpleGUI as sg
import numpy as np
import cv2
import matplotlib.pyplot as plt
from Baysian_Mat import Bayesian_Matte
from PIL import Image, ImageOps
import time # Execution TIme imports
import psutil
from laplac import Laplacianmatting
from compositing import compositing
from QualityTest import mse2d
from QualityTest import sad2d
from QualityTest import psnr2d
from smooth import smooth
# Import your Bayesian_Matte, Laplacianmatting, compositing, mse2d, sad2d, and psnr2d functions here
# Define the PySimpleGUI layout
layout = [
[sg.Text("Select image file")],
[sg.Input(key="-IMAGE_FILE-"), sg.FileBrowse()],
[sg.Text("Select trimap file")],
[sg.Input(key="-TRIMAP_FILE-"), sg.FileBrowse()],
[sg.Text("Select GT file")],
[sg.Input(key="-GT_FILE-"), sg.FileBrowse()],
[sg.Button("Submit")],
[sg.Output(size=(60, 2))]
]
# Create the PySimpleGUI window
window = sg.Window("Alpha Matte Calculation", layout)
# Start time for computing the execution time
st = time.time()
# Get initial memory usage
Memstart = psutil.Process().memory_info().rss / (1024 ** 2)
# Event loop
while True:
event, values = window.read()
if event == sg.WINDOW_CLOSED:
break
if event == "Submit":
# Get the file paths from the input fields
image_path = values["-IMAGE_FILE-"]
trimap_path = values["-TRIMAP_FILE-"]
gt_path = values["-GT_FILE-"]
# Read the image, trimap, and GT files
image = np.array(Image.open(image_path))
image_trimap = np.array(Image.open(trimap_path))
GT = np.array(Image.open(gt_path))
# Step 2 : Calculating Bayesian Matte for the given trimap
alpha, pixel_count = Bayesian_Matte(image, image_trimap)
# Step 3 : Making it back to range (0-255) for display purpose
alpha_disp = alpha * 255
alpha_int8 = np.array(alpha, dtype=int)
et = time.time()
elapsed_time = et - st
# Step 4 : End to End testing - 1 : Calculating the Laplacian Matting
Lalpha = Laplacianmatting(image, image_trimap)
# Step 5 : Compositing Function Display
background = np.array(Image.open(
'C:/Users/aduttagu/Desktop/Main/Bayesian-Matting-Implementation/bayesian-Matting-Python/background.png'))
comp_Bay = compositing(image, alpha_disp, background)
# Step 6 : Smoothening ALpha Methods
smooth_alpha = smooth(alpha_disp)
# Step 7 : Displaying THe Bayesian, Laplacian and GT.
fig, axes = plt.subplots(nrows=2, ncols=2)
axes[0, 0].imshow(alpha_disp, cmap='gray')
axes[0, 0].set_title('Bayesian - Alpha Matte')
axes[0, 1].imshow(Lalpha, cmap='gray')
axes[0, 1].set_title('Laplacian - Alpha Matte')
axes[1, 0].imshow(GT, cmap='gray')
axes[1, 0].set_title('Ground Truth')
axes[1, 1].imshow(smooth_alpha, cmap='gray')
axes[1, 1].set_title('Smoothed Alpha')
plt.show()
plt.imshow(comp_Bay)
plt.show()
# Close the PySimpleGUI window
window.close()
# Part of End to End testing - 1 : Performance Comparision between Laplacian and Bayesian.
Bay_MSE = mse2d(alpha_disp, GT)
Lap_MSE = mse2d(Lalpha, GT)
print("The MSE between the Ground Truth and Bayesian Alpha Matte is :", Bay_MSE)
print("The MSE between the Ground Truth and Laplacian Alpha Matte is :", Lap_MSE)
Bay_SAD = sad2d(alpha_disp, GT)
Lap_SAD = sad2d(Lalpha, GT)
print("The SAD between the Ground Truth and Bayesian Alpha Matte is :", Bay_SAD)
print("The SAD between the Ground Truth and Laplacian Alpha Matte is :", Lap_SAD)
Bay_PSNR = psnr2d(alpha_disp, GT)
Lap_PSNR = psnr2d(Lalpha, GT)
print("The PSNR between the Ground Truth and Bayesian Alpha Matte is :", Bay_PSNR)
print("The PSNR between the Ground Truth and Laplacian Alpha Matte is :", Lap_PSNR)
print('Execution time for Bayesian Matting: {:.3f} seconds'.format(
elapsed_time))
# get usage after completion of code
Memend = psutil.Process().memory_info().rss / (1024 ** 2)
Memuse = Memend - Memstart
print("Total memory consumed in execution of this program : ", Memuse, "MB's")
|
ADG4050/Bayesian-Matting-Implementation
|
bayesian-Matting-Python/UI.py
|
UI.py
|
py
| 4,145 |
python
|
en
|
code
| 0 |
github-code
|
6
|
72530296187
|
import os
import cv2
import pytesseract
import numpy as np
from tqdm import tqdm
INPUT_PATH: str = "inputs_control/"
OUTPUT_PATH: str = "text_pred_control/"
#CONFIG: str = "--psm 6 --oem 1"
CONFIG: str = "--psm 7 --oem 1"
def pipeline(file) -> str:
path: str = f"{INPUT_PATH}{file}"
img: np.ndarray = cv2.imread(path)
text: str = pytesseract.image_to_string(img, config=CONFIG)
iterator: str = file.split(".")[0]
with open(OUTPUT_PATH + f"{iterator}.txt", 'w') as f:
f.write(text)
return text
def main() -> int:
files = os.listdir(INPUT_PATH)
for file in tqdm(files):
pipeline(file)
return 0
if __name__ == "__main__":
main()
|
lukeabela38/image2text-tesseract
|
workspace/main.py
|
main.py
|
py
| 693 |
python
|
en
|
code
| 0 |
github-code
|
6
|
28811405161
|
import torch
import csv
import pytorch_lightning as pl
from sys import platform
if platform == "linux":
from pypesq import pesq
from pystoi import stoi
from math import isnan
from numpy import random
def check_inf_neginf_nan(tensor, error_msg):
assert not torch.any(torch.isinf(tensor)), error_msg
if tensor.dtype == torch.complex32 or tensor.dtype == torch.complex64 or tensor.dtype == torch.complex128:
assert not torch.any(torch.isneginf(tensor.real)), error_msg
assert not torch.any(torch.isneginf(tensor.imag)), error_msg
else:
assert not torch.any(torch.isneginf(tensor)), error_msg
assert not torch.any(torch.isnan(tensor)), error_msg
def l2_norm(s1, s2):
norm = torch.sum(s1*s2, -1, keepdim=True)
return norm
# source https://arxiv.org/pdf/2008.00264.pdf
class SiSNR(object):
def __call__(self, clean, estimate, eps=1e-8):
dot = l2_norm(estimate, clean)
norm = l2_norm(clean, clean)
s_target = (dot * clean)/(norm+eps)
e_nosie = estimate - s_target
target_norm = l2_norm(s_target, s_target)
noise_norm = l2_norm(e_nosie, e_nosie)
snr = 10*torch.log10((target_norm)/(noise_norm+eps)+eps)
return torch.mean(snr)
# source https://github.com/chanil1218/DCUnet.pytorch/blob/2dcdd30804be47a866fde6435cbb7e2f81585213/train.py
class wSDR(object):
def __call__(self, mixed, clean, clean_est, eps=2e-8):
bsum = lambda x: torch.sum(x, dim=1)
def mSDRLoss(orig, est):
correlation = bsum(orig * est)
energies = torch.norm(orig, p=2, dim=1) * torch.norm(est, p=2, dim=1)
return -(correlation / (energies + eps))
noise = mixed - clean
noise_est = mixed - clean_est
a = bsum(clean**2) / (bsum(clean**2) + bsum(noise**2) + eps)
target_wSDR = a * mSDRLoss(clean, clean_est)
noise_wSDR = (1 - a) * mSDRLoss(noise, noise_est)
wSDR = target_wSDR + noise_wSDR
return torch.mean(wSDR)
def cRM(S, Y, eps=1e-8):
M_r_numer = (Y.real * S.real) + (Y.imag * S.imag)
M_r_denom = torch.square(Y.real) + torch.square(Y.imag)
M_r = M_r_numer / (M_r_denom + eps)
M_i_numer = (Y.real * S.imag) - (Y.imag * S.real)
M_i_denom = torch.square(Y.real) + torch.square(Y.imag)
M_i = M_i_numer / (M_i_denom + eps)
M = torch.complex(M_r, M_i)
return M
def bound_cRM(cRM):
target_noise_mask_mag = torch.abs(cRM)
target_noise_mask_mag_tanh = torch.tanh(target_noise_mask_mag)
target_noise_mag_tanh_real = target_noise_mask_mag_tanh * torch.cos(torch.angle(cRM))
target_noise_mag_tanh_imag = target_noise_mask_mag_tanh * torch.sin(torch.angle(cRM))
target_noise_mask_phase = torch.atan2(target_noise_mag_tanh_imag, target_noise_mag_tanh_real)
target_noise_mask_real = target_noise_mask_mag_tanh * torch.cos(target_noise_mask_phase)
target_noise_mask_imag = target_noise_mask_mag_tanh * torch.sin(target_noise_mask_phase)
return torch.complex(target_noise_mask_real, target_noise_mask_imag)
def complex_mat_mult(A, B):
outp_real = (A.real * B.real) - (A.imag * B.imag)
outp_imag = (A.real * B.imag) + (A.imag * B.real)
Y = torch.complex(outp_real, outp_imag)
return Y
def complex_lrelu(input):
# return torch.nn.functional.leaky_relu(input.real) + 1j*torch.nn.functional.leaky_relu(input.imag)
return torch.complex(torch.nn.functional.leaky_relu(input.real), torch.nn.functional.leaky_relu(input.imag))
def apply_complex(fr, fi, input):
# return (fr(input.real)[0]-fi(input.imag)[0]) + 1j*(fr(input.imag)[0]+fi(input.real)[0])
return torch.complex(fr(input.real)-fi(input.imag), (fr(input.imag)+fi(input.real)))
# source https://github.com/huyanxin/DeepComplexCRN/blob/bc6fd38b0af9e8feb716c81ff8fbacd7f71ad82f/complexnn.py
class ComplexLSTM(torch.nn.Module):
def __init__(self, input_size, hidden_size, num_layers, bidirectional, batch_first, projection_dim=None):
super(ComplexLSTM, self).__init__()
self.input_dim = input_size
self.rnn_units = hidden_size
self.real_lstm = torch.nn.LSTM(input_size=self.input_dim, hidden_size=self.rnn_units, num_layers=num_layers,
bidirectional=bidirectional, batch_first=batch_first)
self.imag_lstm = torch.nn.LSTM(input_size=self.input_dim, hidden_size=self.rnn_units, num_layers=num_layers,
bidirectional=bidirectional, batch_first=batch_first)
if bidirectional:
bidirectional=2
else:
bidirectional=1
if projection_dim is not None:
self.projection_dim = projection_dim
self.r_trans = torch.nn.Linear(self.rnn_units*bidirectional, self.projection_dim)
self.i_trans = torch.nn.Linear(self.rnn_units*bidirectional, self.projection_dim)
else:
self.projection_dim = None
def forward(self, inputs):
if isinstance(inputs,list):
real, imag = inputs.real, inputs.imag
elif isinstance(inputs, torch.Tensor):
real, imag = inputs.real, inputs.imag
r2r_out = self.real_lstm(real)[0]
r2i_out = self.imag_lstm(real)[0]
i2r_out = self.real_lstm(imag)[0]
i2i_out = self.imag_lstm(imag)[0]
real_out = r2r_out - i2i_out
imag_out = i2r_out + r2i_out
if self.projection_dim is not None:
real_out = self.r_trans(real_out)
imag_out = self.i_trans(imag_out)
return torch.complex(real_out, imag_out)
def flatten_parameters(self):
self.imag_lstm.flatten_parameters()
self.real_lstm.flatten_parameters()
def mag_phase_2_wave(mag, phase, config):
real = mag * torch.cos(phase)
imag = mag * torch.sin(phase)
comp = torch.complex(real, imag)
comp = torch.nn.functional.pad(comp, (0,0,0,1))
audio = torch.istft(comp, n_fft=config.fft_size, hop_length=config.hop_length, \
win_length=config.window_length, normalized=config.normalise_stft)
return audio
def calc_metric(clean_audio, predict_audio, config, metric):
metric_arr = []
for i in range(predict_audio.shape[0]):
metric_i = metric(clean_audio[i,:].cpu().numpy(), predict_audio[i,:].cpu().numpy(), config.sr)
if not isnan(metric_i):
metric_arr.append(metric_i)
pesq_av = float(sum(metric_arr)) / max(len(metric_arr), 1)
return pesq_av
def calc_loss(self, target_noise_mask, predict_noise_mask, \
predict_noise_audio, predict_clean_audio,
noise_audio, noisy_audio, clean_audio):
if self.hparams['noise_loss_type'] == 0:
noise_loss_orig = self.config.L1(target_noise_mask, predict_noise_mask)
elif self.hparams['noise_loss_type'] == 1:
noise_loss_orig = self.config.wSDR(noisy_audio, noise_audio, predict_noise_audio)
elif self.hparams['noise_loss_type'] == 2:
noise_loss_orig = self.config.L1(target_noise_mask, predict_noise_mask) + \
self.config.L1(noise_audio, predict_noise_audio)
elif self.hparams['noise_loss_type'] == 3:
noise_loss_orig = self.config.wSDR(noisy_audio, noise_audio, predict_noise_audio) + \
self.config.L1(noise_audio, predict_noise_audio)
elif self.hparams['noise_loss_type'] == 4:
noise_loss_orig = self.config.wSDR(noisy_audio, noise_audio, predict_noise_audio) + \
self.config.L1(target_noise_mask, predict_noise_mask)
elif self.hparams['noise_loss_type'] == 5:
if target_noise_mask.dtype == torch.complex32 or target_noise_mask.dtype == torch.complex64 or target_noise_mask.dtype == torch.complex128:
noise_loss_orig = self.config.wSDR(noisy_audio, noise_audio, predict_noise_audio) + \
self.config.mse(target_noise_mask.real, predict_noise_mask.real) + \
self.config.mse(target_noise_mask.imag, predict_noise_mask.imag)
else:
noise_loss_orig = self.config.wSDR(noisy_audio, noise_audio, predict_noise_audio) + \
self.config.mse(target_noise_mask, predict_noise_mask)
noise_loss = (self.hparams['noise_alpha'] * noise_loss_orig)
if self.hparams['speech_loss_type'] == 0:
speech_loss_orig = -self.config.SiSNR(clean_audio, predict_clean_audio)
elif self.hparams['speech_loss_type'] == 1:
speech_loss_orig_small = torch.mean(self.config.CDPAM.forward(clean_audio, predict_clean_audio))
speech_loss_orig = speech_loss_orig_small * 10e5
speech_loss = (self.hparams['speech_alpha'] * speech_loss_orig)
total_loss = noise_loss + speech_loss
return noise_loss, speech_loss, total_loss
def train_batch_2_loss(self, train_batch, batch_idx, dtype):
noise_data, noisy_data, clean_data, id = train_batch
check_inf_neginf_nan(clean_data, "Found inf, neginf or nan in clean data STFT!")
check_inf_neginf_nan(noise_data, "Found inf, neginf or nan in noise data STFT!")
check_inf_neginf_nan(noisy_data, "Found inf, neginf or nan in noisy data STFT!")
noise_mag = torch.abs(noise_data)
noise_phase = torch.angle(noise_data)
noisy_mag = torch.abs(noisy_data)
noisy_phase = torch.angle(noisy_data)
clean_mag = torch.abs(clean_data)
clean_phase = torch.angle(clean_data)
noise_audio = mag_phase_2_wave(noise_mag, noise_phase, self.config)
noisy_audio = mag_phase_2_wave(noisy_mag, noisy_phase, self.config)
clean_audio = mag_phase_2_wave(clean_mag, clean_phase, self.config)
if dtype == "real":
target_noise_mask = torch.sigmoid(noise_mag / noisy_mag)
noisy_mag_scaled = (noisy_mag - self.config.data_minR) / (self.config.data_maxR - self.config.data_minR)
predict_noise_mask = self(noisy_mag_scaled)
predict_noise_mag = noisy_mag * predict_noise_mask
predict_clean_mag = noisy_mag - predict_noise_mag
predict_noise_audio = mag_phase_2_wave(predict_noise_mag, noisy_phase, self.config)
predict_clean_audio = mag_phase_2_wave(predict_clean_mag, noisy_phase, self.config)
elif dtype == "complex":
target_noise_mask_out = cRM(noise_data, noisy_data)
target_noise_mask = bound_cRM(target_noise_mask_out)
# noisy_data_standardised = (noisy_data - torch.mean(noisy_data)) / torch.std(noisy_data)
noisy_data_scaled = torch.view_as_complex((2 * ((torch.view_as_real(noisy_data) - self.config.data_minC) /
(self.config.data_maxC - self.config.data_minC))) - 1)
predict_noise_mask_out = self(noisy_data_scaled)
predict_noise_mask = bound_cRM(predict_noise_mask_out)
predict_noise_data = complex_mat_mult(noisy_data, predict_noise_mask)
predict_clean_data = noisy_data - predict_noise_data
predict_noise_audio = mag_phase_2_wave(torch.abs(predict_noise_data), \
torch.angle(predict_noise_data), self.config)
predict_clean_audio = mag_phase_2_wave(torch.abs(predict_clean_data), \
torch.angle(predict_clean_data), self.config)
noise_loss, speech_loss, train_loss = calc_loss(self,
target_noise_mask=target_noise_mask,
predict_noise_mask=predict_noise_mask,
predict_noise_audio=predict_noise_audio,
predict_clean_audio=predict_clean_audio,
noise_audio=noise_audio,
noisy_audio=noisy_audio,
clean_audio=clean_audio)
return noise_loss, speech_loss, train_loss
def val_batch_2_metric_loss(self, val_batch, val_idx, dtype):
noise_data, noisy_data, clean_data, id = val_batch
check_inf_neginf_nan(clean_data, "Found inf, neginf or nan in clean data STFT!")
check_inf_neginf_nan(noise_data, "Found inf, neginf or nan in noise data STFT!")
check_inf_neginf_nan(noisy_data, "Found inf, neginf or nan in noisy data STFT!")
noise_mag = torch.abs(noise_data)
noise_phase = torch.angle(noise_data)
noisy_mag = torch.abs(noisy_data)
noisy_phase = torch.angle(noisy_data)
clean_mag = torch.abs(clean_data)
clean_phase = torch.angle(clean_data)
noise_audio = mag_phase_2_wave(noise_mag, noise_phase, self.config)
noisy_audio = mag_phase_2_wave(noisy_mag, noisy_phase, self.config)
clean_audio = mag_phase_2_wave(clean_mag, clean_phase, self.config)
if dtype == "real":
target_noise_mask = torch.sigmoid(noise_mag / noisy_mag)
noisy_mag_scaled = (noisy_mag - self.config.data_minR) / (self.config.data_maxR - self.config.data_minR)
predict_noise_mask = self(noisy_mag_scaled)
predict_noise_mag = noisy_mag * predict_noise_mask
predict_clean_mag = noisy_mag - predict_noise_mag
predict_clean_audio = mag_phase_2_wave(predict_clean_mag, noisy_phase, self.config)
predict_noise_audio = mag_phase_2_wave(predict_noise_mag, noisy_phase, self.config)
elif dtype == "complex":
target_noise_mask_out = cRM(noise_data, noisy_data)
target_noise_mask = bound_cRM(target_noise_mask_out)
# noisy_data_standardised = (noisy_data - torch.mean(noisy_data)) / torch.std(noisy_data)
noisy_data_scaled = torch.view_as_complex((2 * ((torch.view_as_real(noisy_data) - self.config.data_minC) /
(self.config.data_maxC - self.config.data_minC))) - 1)
predict_noise_mask_out = self(noisy_data_scaled)
predict_noise_mask = bound_cRM(predict_noise_mask_out)
predict_noise_data = complex_mat_mult(noisy_data, predict_noise_mask)
predict_clean_data = noisy_data - predict_noise_data
predict_clean_audio = mag_phase_2_wave(torch.abs(predict_clean_data), \
torch.angle(predict_clean_data), self.config)
predict_noise_audio = mag_phase_2_wave(torch.abs(predict_noise_data), \
torch.angle(predict_noise_data), self.config)
if platform == "linux":
pesq_av = calc_metric(clean_audio, predict_clean_audio, self.config, pesq)
else:
pesq_av = 1
stoi_av = calc_metric(clean_audio, predict_clean_audio, self.config, stoi)
noise_loss, speech_loss, val_loss = calc_loss(self,
target_noise_mask=target_noise_mask,
predict_noise_mask=predict_noise_mask,
predict_noise_audio=predict_noise_audio,
predict_clean_audio=predict_clean_audio,
noise_audio=noise_audio,
noisy_audio=noisy_audio,
clean_audio=clean_audio)
return noise_loss, speech_loss, val_loss, pesq_av, stoi_av, \
predict_noise_audio, predict_clean_audio, \
noise_audio, noisy_audio, clean_audio
def test_batch_2_metric_loss(self, test_batch, test_idx, dtype):
noise_data, noisy_data, clean_data, id, start_point = test_batch
noise_mag = torch.abs(noise_data)
noise_phase = torch.angle(noise_data)
noisy_mag = torch.abs(noisy_data)
noisy_phase = torch.angle(noisy_data)
clean_mag = torch.abs(clean_data)
clean_phase = torch.angle(clean_data)
noise_audio = mag_phase_2_wave(noise_mag, noise_phase, self.config)
noisy_audio = mag_phase_2_wave(noisy_mag, noisy_phase, self.config)
clean_audio = mag_phase_2_wave(clean_mag, clean_phase, self.config)
if dtype == "real":
target_noise_mask = torch.sigmoid(noise_mag / noisy_mag)
noisy_mag_scaled = (noisy_mag - self.config.data_minR) / (self.config.data_maxR - self.config.data_minR)
predict_noise_mask = self(noisy_mag_scaled)
predict_noise_mag = noisy_mag * predict_noise_mask
predict_clean_mag = noisy_mag - predict_noise_mag
predict_clean_audio = mag_phase_2_wave(predict_clean_mag, noisy_phase, self.config)
predict_noise_audio = mag_phase_2_wave(predict_noise_mag, noisy_phase, self.config)
elif dtype == "complex":
target_noise_mask_out = cRM(noise_data, noisy_data)
target_noise_mask = bound_cRM(target_noise_mask_out)
# noisy_data_standardised = (noisy_data - torch.mean(noisy_data)) / torch.std(noisy_data)
noisy_data_scaled = torch.view_as_complex((2 * ((torch.view_as_real(noisy_data) - self.config.data_minC) /
(self.config.data_maxC - self.config.data_minC))) - 1)
predict_noise_mask_out = self(noisy_data_scaled)
predict_noise_mask = bound_cRM(predict_noise_mask_out)
predict_noise_data = complex_mat_mult(noisy_data, predict_noise_mask)
predict_clean_data = noisy_data - predict_noise_data
predict_clean_audio = mag_phase_2_wave(torch.abs(predict_clean_data), \
torch.angle(predict_clean_data), self.config)
predict_noise_audio = mag_phase_2_wave(torch.abs(predict_noise_data), \
torch.angle(predict_noise_data), self.config)
noise_audio = mag_phase_2_wave(noise_mag, noise_phase, self.config)
noisy_audio = mag_phase_2_wave(noisy_mag, noisy_phase, self.config)
if platform == "linux":
pesq_av = calc_metric(clean_audio, predict_clean_audio, self.config, pesq)
else:
pesq_av = 1
stoi_av = calc_metric(clean_audio, predict_clean_audio, self.config, stoi)
noise_loss, speech_loss, test_loss = calc_loss(self,
target_noise_mask=target_noise_mask,
predict_noise_mask=predict_noise_mask,
predict_noise_audio=predict_noise_audio,
predict_clean_audio=predict_clean_audio,
noise_audio=noise_audio,
noisy_audio=noisy_audio,
clean_audio=clean_audio)
return noise_loss, speech_loss, test_loss, pesq_av, stoi_av, \
predict_noise_audio, predict_clean_audio, \
noise_audio, noisy_audio, clean_audio, id, start_point
def epoch_end(self, outputs, type):
no_of_batches = len(outputs)
random_batches = random.choice(no_of_batches, size=min(self.config.val_log_sample_size, no_of_batches), replace=False)
no_of_samples = min(self.config.data_params['batch_size'],
outputs[-1]['clean'].shape[0],
outputs[-1]['predict_clean'].shape[0],
outputs[-1]['noise'].shape[0],
outputs[-1]['predict_noise'].shape[0],
outputs[-1]['noisy'].shape[0])
random_samples = random.choice(no_of_samples, size=min(self.config.val_log_sample_size, no_of_samples), replace=False)
for i, ridx in enumerate(range(min(self.config.val_log_sample_size, no_of_samples))):
clean_sample = outputs[random_batches[ridx]]['clean'][random_samples[ridx],:]
predict_clean_sample = outputs[random_batches[ridx]]['predict_clean'][random_samples[ridx],:]
noise_sample = outputs[random_batches[ridx]]['noise'][random_samples[ridx],:]
predict_noise_sample = outputs[random_batches[ridx]]['predict_noise'][random_samples[ridx],:]
noisy_sample = outputs[random_batches[ridx]]['noisy'][random_samples[ridx],:]
self.logger.experiment.add_audio("clean({})/{}".format(type, i),
clean_sample,
self.global_step,
sample_rate=self.config.sr)
self.logger.experiment.add_audio("predict_clean({})/{}".format(type, i),
predict_clean_sample,
self.global_step,
sample_rate=self.config.sr)
self.logger.experiment.add_audio("noise({})/{}".format(type, i),
noise_sample,
self.global_step,
sample_rate=self.config.sr)
self.logger.experiment.add_audio("predict_noise({})/{}".format(type, i),
predict_noise_sample,
self.global_step,
sample_rate=self.config.sr)
self.logger.experiment.add_audio("noisy({})/{}".format(type, i),
noisy_sample,
self.global_step,
sample_rate=self.config.sr)
class InputMonitor(pl.Callback):
def on_train_batch_start(self, trainer, pl_module, batch, batch_idx, dataloader_idx):
if (batch_idx + 1) % trainer.log_every_n_steps == 0:
noise_real = batch[0].real
noise_imag = batch[0].imag
noisy_real = batch[1].real
noisy_imag = batch[1].imag
clean_real = batch[2].real
clean_imag = batch[2].imag
logger = trainer.logger
logger.experiment.add_histogram("noise data real", noise_real, global_step=trainer.global_step)
logger.experiment.add_histogram("noise data imag", noise_imag, global_step=trainer.global_step)
logger.experiment.add_histogram("noisy data real", noisy_real, global_step=trainer.global_step)
logger.experiment.add_histogram("noisy data imag", noisy_imag, global_step=trainer.global_step)
logger.experiment.add_histogram("clean data real", clean_real, global_step=trainer.global_step)
logger.experiment.add_histogram("clean data imag", clean_imag, global_step=trainer.global_step)
class CheckBatchGradient(pl.Callback):
def on_train_start(self, trainer, model):
n = 0
example_input = model.example_input_array.to(model.device)
example_input.requires_grad = True
model.zero_grad()
output = model(example_input)
output[n].abs().sum().backward()
zero_grad_inds = list(range(example_input.size(0)))
zero_grad_inds.pop(n)
if example_input.grad[zero_grad_inds].abs().sum().item() > 0:
raise RuntimeError("Your model mixes data across the batch dimension!")
|
Youzi-ciki/DCS-Net
|
network_functions.py
|
network_functions.py
|
py
| 22,891 |
python
|
en
|
code
| 1 |
github-code
|
6
|
16254773107
|
import pandas as pd
import numpy as np
import random
from sklearn.datasets import load_iris
import matplotlib.pyplot as plt
x= ""
def calc_color_indxs(centroids):
#function assigns centroid indexes to each training example i.e. it assigns the
# nearest cluster centroid to each training example
# It uses Eucledian Distance to measure the distance between cluster centroids and training example
global x
centroid_indx = np.zeros(((x.shape[0]),1))
for i in range(0,x.shape[0]):
dist = x[i,:]-centroids
dist = np.sum(np.power(dist,2),axis = 1)
centroid_indx[i] = np.argmin(dist)
return centroid_indx.astype(int)
def calc_cost(centroids,sample_color_indx):
#calculates the cost value of the calculated centroid.
#cost = average of the distances between the centroids and the assigned training examples
sample_centroids = centroids[sample_color_indx.reshape((sample_color_indx.shape[0]))]
dist = x - sample_centroids
dist = np.sum(np.power(np.sum(np.power(dist,2),axis = 1),0.5),axis = 0)
return dist/sample_centroids.shape[0]
def update_centroids(centroids,sample_color_indx,k):
#updates the centroid for each assigned cluster
#calculates the centroid by taking mean of all the example assigned to the cluster
for i in range(0,k):
indxs = np.where(sample_color_indx == i)
x_centroid = x[indxs[0]]
if x_centroid.shape[0] == 0:
continue
centroids[i] = np.mean(x_centroid,axis = 0)
return centroids
if __name__ == '__main__':
data = load_iris(as_frame = True)
df = data.data
num_of_features = df.shape[1]
x = np.array(df.iloc[1:,0:num_of_features])
k = int(input("Enter Number of Clusters: "))
random_init_indx = random.sample(range(0,df.shape[0]),k)
centroids = np.array(df.iloc[random_init_indx,0:num_of_features])
plt.subplot(1,2,1)
i = 0
#------------------------------------------------------------------
sample_color_indx = calc_color_indxs(centroids) #step1
cost0 = calc_cost(centroids,sample_color_indx)
prev_centroids = centroids
centroids = update_centroids(centroids,sample_color_indx,k) #step2\
plt.scatter(i,cost0)
i = i + 1
#----------------------------------------------------------------
sample_color_indx = calc_color_indxs(centroids) #step1
cost1 = calc_cost(centroids,sample_color_indx) #step2
#--------------------------------------------------------------------
while cost0-cost1>=pow(10,-9):
i = i + 1
plt.scatter(i,cost1)
prev_centroids = centroids
centroids = update_centroids(centroids,sample_color_indx,k)
cost0 = cost1
sample_color_indx = calc_color_indxs(centroids)
cost1 = calc_cost(centroids,sample_color_indx)
print(cost0)
#plots two subplots in a figure,
#1.) Cost funcn vs. no. of iterations
#2.) Plot Training examples of same clusters with same color.
plt.subplot(1,2,2)
sample_color_indx = calc_color_indxs(prev_centroids)
colors = plt.cm.Spectral(np.linspace(0,1,k))
for i,col in zip(range(k),colors):
indxs = np.where(sample_color_indx == i)
x_centroid = x[indxs[0]]
plt.scatter(x_centroid[:,0],x_centroid[:,1],color = col)
plt.show()
|
DhyeyDabhi/Machine-Learning
|
K Means Clustering/Logic Code/KMeans.py
|
KMeans.py
|
py
| 3,311 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21480313260
|
import bpy
import re
from ..helpers import sentence_join
default_lock = False
default_lock_array = [default_lock] * 3
component_names = ('X', 'Y', 'Z', 'W')
def is_prop_locked(pb, name, component_index):
if name == 'location':
return getattr(pb, 'lock_location', default_lock_array)[component_index]
elif name in {'rotation_euler', 'rotation_quaternion', 'rotation_axis_angle'}:
if component_index < 3:
return getattr(pb, 'lock_rotation', default_lock_array)[component_index]
else:
return getattr(pb, 'lock_rotation_w', default_lock)
elif name == 'scale':
return getattr(pb, 'lock_scale', default_lock_array)[component_index]
class GRET_OT_channels_delete_unavailable(bpy.types.Operator):
"""Delete location/rotation/scale channels that are locked in the transform panel"""
bl_idname = 'gret.channels_delete_unavailable'
bl_label = "Delete Unavailable Channels"
bl_options = {'REGISTER', 'UNDO'}
@classmethod
def poll(cls, context):
return context.space_data and context.space_data.type in {'DOPESHEET_EDITOR', 'GRAPH_EDITOR'}
def execute(self, context):
obj = context.active_object
action = obj.animation_data.action if (obj and obj.animation_data) else None
if not action:
return {'CANCELLED'}
remove_fcurves = []
delete_invalid = False
num_invalid = num_locked = 0
for fc in action.fcurves:
try:
obj.path_resolve(fc.data_path)
except ValueError:
if delete_invalid:
print(f"Removing curve, can't resolve {fc.data_path}")
remove_fcurves.append(fc)
num_invalid += 1
continue
pb_match = re.match(r'^pose\.bones\[\"([^\"]+)"\]\.(\w+)$', fc.data_path)
if pb_match:
pb = obj.pose.bones.get(pb_match[1])
prop_name = pb_match[2]
if pb and is_prop_locked(pb, prop_name, fc.array_index):
print(f"Removing curve, bone {pb.name} {component_names[fc.array_index]} "
f"{prop_name} is locked")
remove_fcurves.append(fc)
num_locked += 1
continue
for fc in remove_fcurves:
action.fcurves.remove(fc)
num_removed_str = sentence_join([
f"{num_invalid} invalid" if num_invalid else "",
f"{num_locked} locked transform" if num_locked else "",
])
if num_removed_str:
self.report({'INFO'}, f"Removed {num_removed_str} curves.")
return {'FINISHED'}
def draw_menu(self, context):
self.layout.operator(GRET_OT_channels_delete_unavailable.bl_idname)
def register(settings, prefs):
if not prefs.animation__enable_channels_delete_unavailable:
return False
bpy.utils.register_class(GRET_OT_channels_delete_unavailable)
bpy.types.GRAPH_MT_channel.append(draw_menu)
bpy.types.DOPESHEET_MT_channel.append(draw_menu)
def unregister():
bpy.types.GRAPH_MT_channel.remove(draw_menu)
bpy.types.DOPESHEET_MT_channel.remove(draw_menu)
bpy.utils.unregister_class(GRET_OT_channels_delete_unavailable)
|
greisane/gret
|
anim/channels_delete_unavailable.py
|
channels_delete_unavailable.py
|
py
| 3,374 |
python
|
en
|
code
| 298 |
github-code
|
6
|
29841914634
|
import threading
import traitlets
import pyrosetta
import pyrosetta.rosetta.basic.options
import pyrosetta.rosetta.protocols.rosetta_scripts as rosetta_scripts
import pyrosetta.rosetta.protocols.moves as moves
import pyrosetta.distributed
import pyrosetta.distributed.tasks.taskbase as taskbase
import pyrosetta.distributed.packed_pose as packed_pose
def validate(protocol_xml):
"""Perform schema and parse validation for the given protocol xml."""
try:
test_task = BaseRosettaScriptsTask(protocol_xml)
test_task.maybe_setup()
except RuntimeError as error:
raise error
class BaseRosettaScriptsTask(taskbase.TaskBase):
@property
@pyrosetta.distributed.requires_init
@pyrosetta.distributed.with_lock
def parser(self):
if not getattr(self, "_parser", None):
BaseRosettaScriptsTask._parser = \
rosetta_scripts.RosettaScriptsParser()
return self._parser
protocol_xml = traitlets.CUnicode()
def __init__(self, protocol_xml):
super().__init__(protocol_xml=protocol_xml)
@pyrosetta.distributed.requires_init
@pyrosetta.distributed.with_lock
def setup(self):
self.default_options = pyrosetta.rosetta.basic.options.process()
self.tag = self.parser.create_tag_from_xml_string(
self.protocol_xml, self.default_options)
# Validate by parsing
self.parser.parse_protocol_tag(self.tag, self.default_options)
self.protocol_lock = threading.Lock()
@property
@pyrosetta.distributed.requires_init
@pyrosetta.distributed.with_lock
def parsed_protocol(self):
return self.parser.parse_protocol_tag(self.tag, self.default_options)
def execute(self, pack_or_pose):
return packed_pose.to_packed(self.apply(pack_or_pose))
class MultioutputRosettaScriptsTask(BaseRosettaScriptsTask):
@pyrosetta.distributed.requires_init
def apply(self, pack_or_pose):
"""Apply task generating pose objects."""
protocol = self.parsed_protocol
wpose = packed_pose.to_pose(pack_or_pose)
with self.protocol_lock:
protocol.apply(wpose)
if protocol.get_last_move_status() != moves.MoverStatus.MS_SUCCESS:
return
while wpose:
yield wpose
wpose = protocol.get_additional_output()
class SingleoutputRosettaScriptsTask(BaseRosettaScriptsTask):
@pyrosetta.distributed.requires_init
def apply(self, pack_or_pose):
"""Apply task returning a pose object."""
protocol = self.parsed_protocol
wpose = packed_pose.to_pose(pack_or_pose)
with self.protocol_lock:
protocol.apply(wpose)
if protocol.get_last_move_status() != moves.MoverStatus.MS_SUCCESS:
return
else:
return wpose
|
MedicaicloudLink/Rosetta
|
main/source/src/python/PyRosetta/src/pyrosetta/distributed/tasks/rosetta_scripts.py
|
rosetta_scripts.py
|
py
| 2,892 |
python
|
en
|
code
| 1 |
github-code
|
6
|
2279604404
|
from Sentence import Sentence
import nltk
class Text:
def __init__(self, rawText, name):
self.rawText = rawText#self.formatText(rawText)
self.name = name
splitAtNewlines = [s.strip() for s in rawText.splitlines()]
rawSentences = []
for line in splitAtNewlines:
sentencesInLine = nltk.sent_tokenize(line)
rawSentences.extend(sentencesInLine)
self.sentences = []
for rawSentence in rawSentences:
sentence = Sentence(self, rawSentence)
self.sentences.append(sentence)
def formatText(self, rawText):
return rawText.replace(u"\u2018", "'").replace(u"\u2019", "'").replace(u"\xa9", "e").replace(u"\u2014","-").decode("utf8")
|
Lombre/LemmaLearner
|
Text.py
|
Text.py
|
py
| 745 |
python
|
en
|
code
| 0 |
github-code
|
6
|
75336194426
|
import requests
import collections
import secrets
import json
import sqlite3
import scrape
from bs4 import BeautifulSoup
API_KEY = secrets.API_KEY
headers = {
"Authorization": "Bearer %s" % API_KEY
}
BASEURL = 'https://api.yelp.com/v3/businesses/search'
CACHE_DICT = {}
CACHE_FILENAME = 'search_cache.json'
DB_NAME = 'yelp.sqlite'
class Filter:
def __init__(self):
self.cities = []
self.terms = []
def add_city(self, city):
if city not in self.cities:
self.cities.append(city)
def add_term(self, t):
if t not in self.terms:
self.terms.append(t)
def show_city_list(self):
return self.cities
def show_term_list(self):
return self.terms
class Business:
def __init__(self, business):
self.business_name = business.get('name', '')
self.yelp_id = business.get('id', '')
self.city = business.get('location', {}).get('city', '')
self.phone_number = business.get('phone', '')
self.review_count = business.get('review_count', -1)
self.rating = business.get('rating', -1)
self.price = business.get('price', '').count('$')
self.url = business.get('url', '')
self.address = business.get('location', {}).get('address1', '')
if self.business_name == None: self.business_name = ''
if self.yelp_id == None: self.yelp_id = ''
if self.city == None: self.city = ''
if self.phone_number == None: self.phone_number = ''
if self.review_count == None: self.review_count = -1
if self.rating == None: self.rating = -1
if self.price == None: self.price = 0
if self.url == None: self.url = ''
if self.address == None: self.address = ''
self.category = ['NULL'] * 3
if 'categories' in business:
for i in range(min(3, len(business['categories']))):
self.category[i] = business['categories'][i]['title']
self.pic, self.review = scrape.get_info_from_url(self.url)
def get_business_info(self):
return [self.yelp_id, self.business_name, self.city, self.phone_number,
self.review_count, self.rating, self.price, self.address, self.url]
def get_category_info(self):
return [self.yelp_id] + self.category
def open_cache():
''' Opens the cache file if it exists and loads the JSON into
the CACHE_DICT dictionary.
if the cache file doesn't exist, creates a new cache dictionary
Args:
None
Returns:
cache_dict (dict): The opened cache.
'''
try:
cache_file = open(CACHE_FILENAME, 'r')
cache_contents = cache_file.read()
cache_dict = json.loads(cache_contents)
cache_file.close()
except:
cache_dict = {}
return cache_dict
def save_cache(cache_dict):
''' Saves the current state of the cache to disk
Args:
cache_dict (dict): The dictionary to save.
Returns:
None
'''
dumped_json_cache = json.dumps(cache_dict)
fw = open(CACHE_FILENAME, "w")
fw.write(dumped_json_cache)
fw.close()
def construct_unique_key(baseurl, params):
''' constructs a key that is guaranteed to uniquely and
repeatably identify an API request by its baseurl and params
Args:
baseurl (str): The URL for the API endpoint.
params (dict): A dictionary of param:value pairs.
Returns:
unique_key (str): The unique key as a string.
'''
param_string = []
connector = "_"
for k in params.keys():
param_string.append(f"{k}_{params[k]}")
param_string.sort()
unique_key = baseurl + connector + connector.join(param_string)
return unique_key
def make_request(baseurl, params):
'''Make a request to the Web API using the baseurl and params
Args;
baseurl (str): The URL for the API endpoint.
params (dict): A dictionary of param:value pairs.
Returns:
results (dict): The JSON response from the request.
'''
response = requests.get(baseurl, params=params, headers=headers)
results = response.json()
return results
def make_request_with_cache(baseurl, term='', location='', count=50):
''' Check the cache for a saved result for this baseurl+params:values
combo. If the result is found, return it. Otherwise send a new
request, save it, then return it.
Args:
baseurl (str): The URL for the API endpoint
term (str): The search term passes to the API.
location (str): The search location passes to the API.
count (int): The number of business results to return.
Return:
results (dict): The JSON response from the request.
'''
params = {
'term': term.lower().replace(" ", "+"),
'location': location.lower().replace(" ", "+"),
'limit': count
}
request_key = construct_unique_key(baseurl=baseurl, params=params)
if request_key in CACHE_DICT:
# The data has been fetched before and stored in the cache
return CACHE_DICT[request_key]
else:
results = make_request(baseurl=baseurl, params=params)
CACHE_DICT[request_key] = results
save_cache(cache_dict=CACHE_DICT)
return results
def write_to_business_single(info):
''' Write a row into business_info table
Args:
info (list): A list of business information.
Returns:
None
'''
insert_instructors = '''
INSERT OR REPLACE INTO business_info
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)
'''
connection = sqlite3.connect(DB_NAME)
cursor = connection.cursor()
cursor.execute(insert_instructors, info)
connection.commit()
connection.close()
def write_to_category_single(info):
''' Write a row into category_info table.
Args:
info (list): A list of category info of one business.
Returns:
None
'''
insert_instructors = '''
INSERT OR REPLACE INTO category_info
VALUES (?, ?, ?, ?)
'''
connection = sqlite3.connect(DB_NAME)
cursor = connection.cursor()
cursor.execute(insert_instructors, info)
connection.commit()
connection.close()
def write_to_business(list):
''' Write multiple rows into business_info table.
Args:
list (list): A list of business objects.
Returns:
None
'''
for busi_obj in list:
write_to_business_single(busi_obj.get_business_info())
def write_to_category(list):
''' Write multiple rows into category_info table.
Args:
list (list): A list of business objects.
Returns:
None
'''
for busi_obj in list:
write_to_category_single(busi_obj.get_category_info())
def get_business_list(term='', location='', count=50):
''' Fetch the data throught API and process the JSON
response into two lists.
Args:
term (str): The search term passed into API.
location (str): The search location passed into API.
count (int): The number of business results to return.
Returns:
business_list (list): A list of business objects.
'''
results = make_request_with_cache(baseurl=BASEURL,
term=term, location=location, count=count)
business_info = results['businesses']
business_list = []
for business in business_info:
business_list.append(
Business(business)
)
return business_list
if __name__ == '__main__':
# term = 'barbecue'
# city = 'New York'
# busi_list = get_business_list(term=term, location=city, count=50)
# write_to_business_single(busi_list[31].get_business_info())
f = Filter()
f.add_term('Chinese restaurants')
f.add_term('Japanese restaurants')
f.add_term('Indian restaurants')
f.add_term('Mediterranean restaurants')
f.add_term('breakfast')
f.add_term('barbecue')
f.add_term('coffee')
f.add_term('noodles')
f.add_term('food')
f.add_term('hamburger')
f.add_term('sandwich')
f.add_term('bubble tea')
f.add_term('taco')
f.add_term('dumplings')
f.add_term('Korean')
f.add_term('sushi')
f.add_term('ramen')
f.add_term('curry')
f.add_term('cocktail')
f.add_term('bar')
f.add_term('seafood')
f.add_term('hot pot')
f.add_term('steak')
f.add_term('Vegetarian')
f.add_city('San Francisco')
f.add_city('Seattle')
f.add_city('New York')
f.add_city('Ann Arbor')
f.add_city('San Jose')
f.add_city('Boston')
f.add_city('Los Angeles')
f.add_city('Las Vegas')
f.add_city('Chicago')
f.add_city('Washington')
f.add_city('Detroit')
for term in f.show_term_list():
for city in f.show_city_list():
print(term, city)
busi_list = get_business_list(term=term, location=city, count=50)
write_to_business(busi_list)
write_to_category(busi_list)
|
kedongh/507_final_proj
|
yelp.py
|
yelp.py
|
py
| 8,122 |
python
|
en
|
code
| 0 |
github-code
|
6
|
32181991364
|
'''
- Change tree from octree to R-tree. or study other balanced trees
- If octree pool overflows it should create a second pool
'''
from Framework.Segmentation.SegmentationHandler import *
from Framework.Segmentation.ROI import ROI
from Framework.Tools.shunting_yard import shuntingYard
from Framework.Tools.DataStructures import OctreePointBased
class ROISegmentation(SegmentationHandler):
# @averageTimeit
def __init__(self, bundle, shaderDict):
super().__init__(bundle, shaderDict)
self.segmentationIdentifier = SegmentationTypes.ROIs
self.tree = None
self.roiValidator = []
self.rois = []
# self.fiberSizes = bundle.fiberSizes
for sibling in self.parent.children:
if isinstance(sibling, ROISegmentation):
self.tree = sibling.tree
if self.tree == None:
self.tree = OctreePointBased(self.points, self.fiberSizes)
self.fileName = 'ROI Segmentation' # temporal
self.alpha = 0.8
self.validLogic = False
self.configFiberValidator()
self._loadBuffers()
self.buildVertex2Fiber()
self.vboAndLinkVertex2Fiber()
self.boundingbox = BoundingBox(shaderDict, self, bundle.boundingbox.dims, bundle.boundingbox.center)
# @timeit
def segmentMethod(self):
rois2beQuery = None
roisResults = None
if self.validLogic:
rois2beQuery = [self.rois[i] for i in self.logicRois]
else:
rois2beQuery = [self.rois[i] for i in [i for i, e in enumerate(self.roiValidator) if e]]
n = len(rois2beQuery)
if n == 0:
self.fiberValidator[:self.curvescount] = 1
else:
roisResults = np.zeros((n, self.curvescount), dtype=np.int8)
dt = np.dtype([ ('center', np.float32, 3),
('radius', np.float32, 3),
('roiType', np.int32, (1,))])
dataPacked = np.empty(len(rois2beQuery), dtype=dt)
for i in range(len(rois2beQuery)):
dataPacked[i]['center'] = rois2beQuery[i].getCenter(self.inverseModel)
dataPacked[i]['radius'] = rois2beQuery[i].getRadius(self.inverseModel)
dataPacked[i]['roiType'] = rois2beQuery[i].getROIValue()
self.tree.queryCollision(dataPacked, roisResults)
if self.validLogic:
infix = [roisResults[self.logicRois.index(i)] if isinstance(i, int) else i for i in self.logicInfix]
self.fiberValidator[:self.curvescount] = shuntingYard(infix)
else:
self.fiberValidator[:self.curvescount] = roisResults.sum(axis=0, dtype=np.int8) #could overflow with 256 rois or more
def addROI(self, roi):
'''
'''
if isinstance(roi, ROI) and not roi in self.rois:
self.children.append(roi)
self.rois.append(roi)
self.roiValidator.append(True)
def removeROIFromIndex(self, index):
roi = self.rois.pop(index)
self.roiValidator.pop(index)
self.children.remove(roi)
def setValidatorAtIndex(self, index, validate):
self.roiValidator[index] = validate
def updateLogic(self, logicStr):
self.validLogic = False
if logicStr == '':
raise ValueError('ROISegmentation: Empty logic string.')
parsed = self.parseLogicString(logicStr)
if not parsed:
raise ValueError('ROISegmentation: Not a valid string for logic operations.')
self.logicInfix = parsed
self.logicRois = list(set([i for i in parsed if isinstance(i, int)]))
if max(self.logicRois) >= len(self.rois):
raise ValueError('ROISegmentation: Not a valid ID for ROI selection. ID: {}.'.format(max(self.logicRois)))
self.validLogic = True
def parseLogicString(self, logicStr):
''' Could get a list of valid ids '''
numberStack = ''
infix = []
for i in logicStr:
if i.isdigit():
numberStack += i
elif i == '|' or i == '&' or i == '^' or i == '(' or i == ')' or i == '!':
if numberStack != '':
infix.append(int(numberStack))
if infix[-1] >= len(self.rois):
return False
numberStack = ''
infix.append(i)
elif i == '+':
if numberStack != '':
infix.append(int(numberStack))
if infix[-1] >= len(self.rois):
return False
numberStack = ''
infix.append('|')
else:
return False
if numberStack != '':
infix.append(int(numberStack))
return infix
|
GonzaloSabat/phybers
|
phybers/src/utils/fibervis/Framework/Segmentation/ROISegmentation.py
|
ROISegmentation.py
|
py
| 4,067 |
python
|
en
|
code
| 0 |
github-code
|
6
|
22389699971
|
# -*- coding: utf-8 -*-
# (C) 2013 Smile (<http://www.smile.fr>)
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
from odoo import api, fields, models, _
from odoo.tools import format_date
class AccountInvoice(models.Model):
_inherit = 'account.invoice'
@api.multi
def action_invoice_open(self):
if not self._context.get('force_invoice_open'):
invoices_in_error, errors = self._check_unvalid_taxes()
if invoices_in_error:
return {
'type': 'ir.actions.act_window',
'name': _('Unvalid taxes'),
'res_model': 'account.invoice.tax.wizard',
'view_mode': 'form',
'view_id': False,
'res_id': False,
'context': {
'default_invoices_in_error':
repr(invoices_in_error.ids),
'default_errors': '\n'.join(errors),
},
'target': 'new',
}
return super(AccountInvoice, self).action_invoice_open()
@api.multi
def _check_unvalid_taxes(self):
invoices_in_error, errors = self.browse(), []
for invoice in self:
date_invoice = invoice.date_invoice or fields.Date.today()
for tax in invoice.mapped('invoice_line_ids.invoice_line_tax_ids'):
if tax.date_start and date_invoice < tax.date_start:
invoices_in_error |= invoice
errors.append(
_('The tax %s shall apply from %s') %
(tax.name, format_date(self.env, tax.date_start)))
if tax.date_stop and date_invoice > tax.date_stop:
invoices_in_error |= invoice
errors.append(
_('The tax %s shall apply to %s') %
(tax.name, format_date(self.env, tax.date_stop)))
return invoices_in_error, errors
|
detian08/bsp_addons
|
smile/smile_account_tax_period/models/account_invoice.py
|
account_invoice.py
|
py
| 2,040 |
python
|
en
|
code
| 1 |
github-code
|
6
|
41034447740
|
from django.test import TestCase
from car import models
class ModelTest(TestCase):
def test_create_user_with_email_successful(self):
"""Test creating a new car is successful"""
category = 'CO'
model = "TT RS 2020"
name = 'Audi TT RS TURBO'
number_of_doors = 3
description = 'This car is a beast'
car = models.Car.objects.create(
category=category,
model=model,
name=name,
number_of_doors=number_of_doors,
description=description
)
self.assertEqual(car.category, category)
self.assertEqual(car.model, model)
self.assertEqual(car.name, name)
self.assertEqual(car.number_of_doors, number_of_doors)
self.assertEqual(car.description, description)
|
Womencancode/technical-test-Talana
|
app/car/tests/test_models.py
|
test_models.py
|
py
| 814 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27447292826
|
import time
from selenium.webdriver.support.ui import Select
from selenium import webdriver
class InventoryPage():
def __init__(self,driver) :
self.driver = driver
def navigate(self, urlLogin):
self.driver.get(urlLogin)
def changeSorting(self, locatorClass, option):
self.sel = Select (self.driver.find_element_by_class_name (locatorClass))
self.sel.select_by_value (option)
def check_A_to_Z_sort(self):
items_names = self.driver.find_elements_by_class_name("inventory_item_name")
for name in items_names:
name_text=name.text
print(name_text)
names_list=[]
names_list.append(name_text)
sorted_names = sorted(names_list)
if(names_list == sorted_names):
print("'A_to_Z' sorting working ")
else:
print("'A_to_Z' sorting not working")
def check_Z_to_A_sort(self):
items_names = self.driver.find_elements_by_class_name("inventory_item_name")
for name in items_names:
name_text=name.text
print(name_text)
names_list=[]
names_list.append(name_text)
sorted_names = sorted(names_list)
reversed_names = sorted_names.reverse()
if(names_list == reversed_names):
print("'Z_to_A' sorting working ")
else:
print("'Z_to_A' sorting not working")
def check_low_to_high_sort(self):
items_prices = self.driver.find_elements_by_class_name ("inventory_item_price")
for price in items_prices:
price_text=price.text
price_text = price_text.replace('$','')
value = float(price_text)
prices_values=[]
prices_values.append(value)
sorted_prices = sorted(prices_values)
if(prices_values == sorted_prices):
print("'low_to_high' sorting working ")
else:
print("'low_to_high' sorting not working")
def check_high_to_low_sort(self):
items_prices = self.driver.find_elements_by_class_name ("inventory_item_price")
for price in items_prices:
price_text=price.text
price_text = price_text.replace('$','')
value = float(price_text)
prices_values=[]
prices_values.append(value)
sorted_prices = sorted(prices_values)
reversed_prices = sorted_prices.reverse()
if(prices_values == reversed_prices):
print("'high_to_low' sorting working ")
else:
print("'high_to_low' sorting not working")
def click_item_page_and_verify(self,item_full_id):
self.driver.find_element_by_id(item_full_id).click()
item_id = item_full_id[5]
currentURL = self.driver.current_url
assert currentURL == "https://www.saucedemo.com/inventory-item.html?id=" + str(item_id)
print("item page " + str(item_id)+" opened")
def click_item_to_cart_and_verify(self,item_id):
self.driver.find_element_by_id(item_id).click()
item_shopped = self.driver.find_element_by_class_name("shopping_cart_badge")
assert int(item_shopped.text) == 1
self.driver.find_element_by_class_name("shopping_cart_badge").click()
time.sleep(2)
self.driver.find_element_by_id("checkout").click()
time.sleep(2)
currentURL = self.driver.current_url
assert currentURL == "https://www.saucedemo.com/checkout-step-one.html"
print("check out page opened")
|
Abanoub-waheed/python_test
|
inventoryPage.py
|
inventoryPage.py
|
py
| 3,580 |
python
|
en
|
code
| 0 |
github-code
|
6
|
70096737469
|
import itertools
# 소수 판별 함수
# 2보다 작으면 무조건 False
# 2나 3이면 소수다.
# 2 또는 3으로 나눠지면 소수가 아니다.
# 10 미만의 값들은 2나 3으로만 나눠지지 않으면 된다.
# 그 이상의 수들에 대해서는 5, 7, 9, 11, 13, 15... 등의 홀수로 나눠보면 된다. 하지만 이미 3의 배수에 대해서는 앞에서 검사하기 때문에 5, 7, 11, 15,... 의 패턴으로 검사할 수 있다.
# N이 소수인지를 알고 싶으면 N의 제곱근까지만 검사해보면 된다.
def is_prime(n):
if n < 2:
return False
if n == 2 or n == 3:
return True
if n % 2 == 0 or n % 3 == 0:
return False
if n < 9:
return True
k, l = 5, n ** 0.5
while k <= l:
if n % k == 0 or n % (k+2) == 0:
return False
k += 6
return True
def solution(nums):
answer = 0
nums = list(itertools.combinations(nums,3))
for i in nums:
n = sum(i)
if is_prime(n):
answer += 1
return answer
|
YooGunWook/coding_test
|
practice_coding_old/연습문제/소수 만들기.py
|
소수 만들기.py
|
py
| 1,070 |
python
|
ko
|
code
| 0 |
github-code
|
6
|
31496165105
|
import unittest
import socket
def test_server_connection():
# Cria um socket e envia uma mensagem para o servidor
client = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
client.sendto(b"Hello, server!", ("127.0.0.1", 333))
# Verifica se a mensagem foi recebida pelo servidor corretamente
received_message = server.recv(1024)
assert received_message == b"Hello, server!", "Mensagem não recebida pelo servidor corretamente"
# Adiciona a função de teste ao conjunto de testes
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(test_server_connection))
return suite
# Executa o conjunto de testes
unittest.TextTestRunner().run(suite())
|
Trincazul/server-upd
|
src/test/test_server.py
|
test_server.py
|
py
| 701 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
24293577173
|
from copy import deepcopy
def courses_to_take(course_to_pre_reqs):
if not course_to_pre_reqs:
return []
for course in course_to_pre_reqs:
ret = list()
if not course_to_pre_reqs[course]:
print(course_to_pre_reqs)
ret.append(course)
next_pre_reqs = deepcopy(course_to_pre_reqs)
del next_pre_reqs[course]
if not next_pre_reqs:
return ret
for c in next_pre_reqs:
if course in next_pre_reqs[c]:
next_pre_reqs[c].remove(course)
extra = courses_to_take(next_pre_reqs)
if extra is not None:
return ret + extra
return None
def main():
courses = {
'CSC300': ['CSC100', 'CSC200'],
'CSC200': ['CSC100'],
'CSC100': []
}
assert courses_to_take(courses) == ['CSC100', 'CSC200', 'CSC300']
prereqs = {
'CSC400': ['CSC200'],
'CSC300': ['CSC100', 'CSC200'],
'CSC200': ['CSC100'],
'CSC100': []
}
assert courses_to_take(prereqs) == ['CSC100', 'CSC200', 'CSC400', 'CSC300']
prereqs = {
'CSC400': ['CSC300'],
'CSC300': ['CSC100', 'CSC200'],
'CSC200': ['CSC100'],
'CSC100': ['CSC400']
}
assert not courses_to_take(prereqs)
if __name__ == '__main__':
main()
|
ckallum/Daily-Interview-Pro
|
solutions/courseCodes.py
|
courseCodes.py
|
py
| 1,367 |
python
|
en
|
code
| 16 |
github-code
|
6
|
33548045927
|
from django.test import TestCase
from costcenter.forms import FundForm
class FundFormTest(TestCase):
def test_empty_form(self):
form = FundForm()
self.assertIn("fund", form.fields)
self.assertIn("name", form.fields)
self.assertIn("vote", form.fields)
self.assertIn("download", form.fields)
# test just one rendered field
self.assertInHTML(
'<input type="text" name="fund" maxlength="4" required id="id_fund">',
str(form),
)
def test_filled_form(self):
data = {"fund": "C119", "name": "National Procurement", "vote": 1, "download": True}
f = FundForm(data=data)
self.assertTrue(f.is_valid())
def test_vote_not_1_or_5(self):
data = {"fund": "C113", "name": "NP", "vote": "6", "download": 1}
form = FundForm(data=data)
self.assertEqual(form.errors["vote"], ["Vote must be 1 or 5"])
def test_fund_starts_with_non_letter(self):
data = {"fund": "3113"}
form = FundForm(data=data)
self.assertEqual(form.errors["fund"], ["Fund must begin with a letter"])
def test_fund_is_not_4_characters_long(self):
data = {"fund": "c3456"}
form = FundForm(data=data)
msg = f"Ensure this value has at most 4 characters (it has {len(data['fund'])})."
self.assertEqual(form.errors["fund"], [msg])
|
mariostg/bft
|
costcenter/tests/test_forms.py
|
test_forms.py
|
py
| 1,394 |
python
|
en
|
code
| 0 |
github-code
|
6
|
44497013120
|
from traceback import print_stack
from allure_commons.types import AttachmentType
from selenium.webdriver import ActionChains
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
from selenium.common.exceptions import NoSuchElementException, ElementNotVisibleException, ElementNotSelectableException
import allure
import SeleniumFrameWork.utilities.CustomLogger as cl
class BaseClass:
log = cl.customLogger()
def __init__(self, driver):
self.driver = driver
def launchWebPage(self, url, title):
try:
self.driver.get(url)
assert title in self.driver.title
self.log.info("Web Page Launch with " + url)
except:
self.log.info("Web Page Not Launch with " + url)
def getLocatorType(self, locatorType):
locatorType = locatorType.lower()
if locatorType == "id":
return By.ID
elif locatorType == "name":
return By.NAME
elif locatorType == "class":
return By.CLASS_NAME
elif locatorType == "link":
return By.LINK_TEXT
elif locatorType == "xpath":
return By.XPATH
elif locatorType == "css":
return By.CSS_SELECTOR
elif locatorType == "tag":
return By.TAG_NAME
elif locatorType == "plink":
return By.PARTIAL_LINK_TEXT
else:
self.log.error(f"Locator Type {locatorType} entered not found")
print_stack()
return False
def getWebElement(self, locatorValue, locatorType="id"):
webElement = None
try:
locatorType = locatorType.lower()
locatorByType = self.getLocatorType(locatorType)
webElement = self.driver.find_element(locatorByType, locatorValue)
self.log.info(f"Web Element found with locator value {locatorValue} using locator type {locatorByType}")
except:
self.log.error(
f"Web Element Not found with locator value {locatorValue} using locator type {locatorByType}")
print_stack()
return webElement
def waitForElement(self, locatorValue, locatorType="id"):
webElement = None
try:
locatorType = locatorType.lower()
locatorByType = self.getLocatorType(locatorType)
wait = WebDriverWait(self.driver, 25, poll_frequency=1,
ignored_exceptions=[NoSuchElementException, ElementNotVisibleException,
ElementNotSelectableException])
# webElement = self.driver.find_element(locatorByType, locatorValue)
webElement = wait.until(lambda x: x.find_element(locatorByType, locatorValue))
self.log.info(f"Web Element found with locator value {locatorValue} using locator type {locatorByType}")
except:
self.log.error(
f"Web Element Not found with locator value {locatorValue} using locator type {locatorByType}")
print_stack()
self.takeScreenshot(locatorType)
assert False
return webElement
def clickOnElement(self, locatorValue, locatorType="id"):
try:
locatorType = locatorType.lower()
webElement = self.waitForElement(locatorValue, locatorType)
webElement.click()
self.log.info(f"Click On Web Element with locator value {locatorValue} using locator type {locatorType}")
except:
self.log.error(
f"Unable to Click On Element with locator value {locatorValue} using locator type {locatorType}")
print_stack()
assert False
def sendText(self, text, locatorValue, locatorType="id"):
try:
locatorType = locatorType.lower()
webElement = self.waitForElement(locatorValue, locatorType)
webElement.send_keys(text)
self.log.info(
f"Send the text {text} in Web Element with locator value {locatorValue} using locator type {locatorType}")
except:
self.log.info(
f"Unable to Send the text {text} in Web Element with locator value {locatorValue} using locator type {locatorType}")
print_stack()
self.takeScreenshot(locatorType)
assert False
def getText(self, locatorValue, locatorType="id"):
elementText = None
try:
locatorType = locatorType.lower()
webElement = self.waitForElement(locatorValue, locatorType)
elementText = webElement.text
self.log.info(
f"Got the text {elementText} in Web Element with locator value {locatorValue} using locator type {locatorType}")
except:
self.log.info(
f"Unable to get the text {elementText} in Web Element with locator value {locatorValue} using locator type {locatorType}")
print_stack()
return elementText
def isElementDisplayed(self, locatorValue, locatorType="id"):
elementDisplayed = None
try:
locatorType = locatorType.lower()
webElement = self.waitForElement(locatorValue, locatorType)
elementDisplayed = webElement.is_displayed()
self.log.info(
f" Web Element is Displayed web page with locator value {locatorValue} using locator type {locatorType}")
except:
self.log.info(
f" Web Element is Not Displayed web page with locator value {locatorValue} using locator type {locatorType}")
print_stack()
return elementDisplayed
def scrollTo(self, locatorValue, locatorType="id"):
actions = ActionChains(self.driver)
try:
locatorType = locatorType.lower()
webElement = self.waitForElement(locatorValue, locatorType)
actions.move_to_element(webElement).perform()
self.log.info(
f"Scrolled to WebElement with locator value {locatorValue} using locator type {locatorType}")
except:
self.log.info(
f"Unable to Scroll to WebElement with locator value {locatorValue} using locator type {locatorType}")
print_stack()
def takeScreenshot(self, text):
allure.attach(self.driver.get_screenshot_as_png(), name=text, attachment_type=AttachmentType.PNG)
|
sudeepyadav5/SeleniumA2Z
|
SeleniumFrameWork/basepage/BasePage.py
|
BasePage.py
|
py
| 6,476 |
python
|
en
|
code
| 0 |
github-code
|
6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.